From 79a4cbbec94ea72b7c7b44c775f59f02bff3f468 Mon Sep 17 00:00:00 2001 From: Bjorn Neergaard Date: Thu, 13 Jul 2023 16:48:01 -0600 Subject: [PATCH 01/11] builder-next: make stub executor generic The current executor is only tested on Linux, so let's be honest about that. Stubbing this correctly helps avoid incorrectly trying to call into Linux-only code in e.g. libnetwork. Signed-off-by: Bjorn Neergaard --- .../{executor_unix.go => executor_linux.go} | 2 -- builder/builder-next/executor_nolinux.go | 33 +++++++++++++++++++ builder/builder-next/executor_windows.go | 30 ----------------- 3 files changed, 33 insertions(+), 32 deletions(-) rename builder/builder-next/{executor_unix.go => executor_linux.go} (99%) create mode 100644 builder/builder-next/executor_nolinux.go delete mode 100644 builder/builder-next/executor_windows.go diff --git a/builder/builder-next/executor_unix.go b/builder/builder-next/executor_linux.go similarity index 99% rename from builder/builder-next/executor_unix.go rename to builder/builder-next/executor_linux.go index cdb88372b3..b672d718bd 100644 --- a/builder/builder-next/executor_unix.go +++ b/builder/builder-next/executor_linux.go @@ -1,5 +1,3 @@ -//go:build !windows - package buildkit import ( diff --git a/builder/builder-next/executor_nolinux.go b/builder/builder-next/executor_nolinux.go new file mode 100644 index 0000000000..7275822da2 --- /dev/null +++ b/builder/builder-next/executor_nolinux.go @@ -0,0 +1,33 @@ +//go:build !linux + +package buildkit + +import ( + "context" + "errors" + "runtime" + + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/libnetwork" + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/executor/oci" +) + +func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool, _ idtools.IdentityMapping, _ string) (executor.Executor, error) { + return &stubExecutor{}, nil +} + +type stubExecutor struct{} + +func (w *stubExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { + return errors.New("buildkit executor not implemented for "+runtime.GOOS) +} + +func (w *stubExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) error { + return errors.New("buildkit executor not implemented for "+runtime.GOOS) +} + +func getDNSConfig(config.DNSConfig) *oci.DNSConfig { + return nil +} diff --git a/builder/builder-next/executor_windows.go b/builder/builder-next/executor_windows.go deleted file mode 100644 index ffe62a2006..0000000000 --- a/builder/builder-next/executor_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package buildkit - -import ( - "context" - "errors" - - "github.com/docker/docker/daemon/config" - "github.com/docker/docker/libnetwork" - "github.com/docker/docker/pkg/idtools" - "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/executor/oci" -) - -func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool, _ idtools.IdentityMapping, _ string) (executor.Executor, error) { - return &winExecutor{}, nil -} - -type winExecutor struct{} - -func (w *winExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { - return errors.New("buildkit executor not implemented for windows") -} - -func (w *winExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) error { - return errors.New("buildkit executor not implemented for windows") -} - -func getDNSConfig(config.DNSConfig) *oci.DNSConfig { - return nil -} From fd6dd6935b1706ad99cc65b3a74456b680423333 Mon Sep 17 00:00:00 2001 From: Bjorn Neergaard Date: Tue, 11 Jul 2023 08:26:28 -0600 Subject: [PATCH 02/11] vendor: github.com/containerd/containerd v1.7.6 The DeepEqual ignore required in the daemon tests is a bit ugly, but it works given the new protoc output. We also have to ignore lints related to schema1 deprecations; these do not apply as we must continue to support this schema version. Signed-off-by: Bjorn Neergaard --- .../adapters/containerimage/pull.go | 2 +- daemon/containerd/image_pull.go | 2 +- daemon/runtime_unix_test.go | 11 +- libcontainerd/remote/client.go | 17 +- vendor.mod | 32 +- vendor.sum | 106 +- .../AdamKorcz/go-118-fuzz-build}/LICENSE | 0 .../AdamKorcz/go-118-fuzz-build/testing/f.go | 207 + .../AdamKorcz/go-118-fuzz-build/testing/t.go | 129 + .../testing/unsupported_funcs.go | 42 + .../go-winio/pkg/bindfilter/bind_filter.go | 308 + .../pkg/bindfilter/zsyscall_windows.go | 116 + .../containerd/containerd/.cirrus.yml | 82 + .../containerd/containerd/.golangci.yml | 4 + .../github.com/containerd/containerd/.mailmap | 23 +- .../containerd/containerd/ADOPTERS.md | 6 +- .../containerd/containerd/BUILDING.md | 28 +- .../github.com/containerd/containerd/Makefile | 64 +- .../containerd/containerd/Protobuild.toml | 38 +- .../containerd/containerd/README.md | 53 +- .../containerd/containerd/RELEASES.md | 195 +- .../containerd/containerd/ROADMAP.md | 2 +- .../github.com/containerd/containerd/SCOPE.md | 2 +- .../containerd/containerd/Vagrantfile | 58 +- .../containerd/api/events/container.pb.go | 1736 +-- .../containerd/api/events/container.proto | 3 +- .../api/events/container_fieldpath.pb.go | 95 + .../containerd/api/events/content.pb.go | 481 +- .../containerd/api/events/content.proto | 5 +- .../api/events/content_fieldpath.pb.go | 16 + .../containerd/api/events/image.pb.go | 1361 +-- .../containerd/api/events/image.proto | 2 +- .../api/events/image_fieldpath.pb.go | 62 + .../containerd/api/events/namespace.pb.go | 1361 +-- .../containerd/api/events/namespace.proto | 3 +- .../api/events/namespace_fieldpath.pb.go | 62 + .../containerd/api/events/sandbox.pb.go | 316 + .../containerd/api/events/sandbox.proto | 37 + .../api/events/sandbox_fieldpath.pb.go | 44 + .../containerd/api/events/snapshot.pb.go | 1108 +- .../containerd/api/events/snapshot.proto | 5 +- .../api/events/snapshot_fieldpath.pb.go | 52 + .../containerd/api/events/task.pb.go | 4079 ++----- .../containerd/api/events/task.proto | 9 +- .../api/events/task_fieldpath.pb.go | 191 + .../containerd/api/runtime/sandbox/v1/doc.go | 17 + .../api/runtime/sandbox/v1/sandbox.pb.go | 1480 +++ .../api/runtime/sandbox/v1/sandbox.proto | 136 + .../api/runtime/sandbox/v1/sandbox_grpc.pb.go | 377 + .../runtime/sandbox/v1/sandbox_ttrpc.pb.go | 156 + .../v2/task => api/runtime/task/v2}/doc.go | 0 .../containerd/api/runtime/task/v2/shim.pb.go | 2338 ++++ .../task => api/runtime/task/v2}/shim.proto | 9 +- .../api/runtime/task/v2/shim_ttrpc.pb.go | 301 + .../services/containers/v1/containers.pb.go | 4408 ++------ .../services/containers/v1/containers.proto | 22 +- .../containers/v1/containers_grpc.pb.go | 314 + .../api/services/content/v1/content.pb.go | 6625 +++--------- .../api/services/content/v1/content.proto | 46 +- .../services/content/v1/content_grpc.pb.go | 569 + .../api/services/diff/v1/diff.pb.go | 2039 +--- .../api/services/diff/v1/diff.proto | 6 +- .../api/services/diff/v1/diff_grpc.pb.go | 151 + .../api/services/events/v1/events.pb.go | 1709 +-- .../api/services/events/v1/events.proto | 5 +- .../api/services/events/v1/events_grpc.pb.go | 238 + .../api/services/images/v1/images.pb.go | 3436 ++---- .../api/services/images/v1/images.proto | 21 +- .../api/services/images/v1/images_grpc.pb.go | 266 + .../introspection/v1/introspection.pb.go | 1865 +--- .../introspection/v1/introspection.proto | 9 +- .../introspection/v1/introspection_grpc.pb.go | 152 + .../api/services/leases/v1/leases.pb.go | 3835 ++----- .../api/services/leases/v1/leases.proto | 9 +- .../api/services/leases/v1/leases_grpc.pb.go | 306 + .../services/namespaces/v1/namespace.pb.go | 3163 ++---- .../services/namespaces/v1/namespace.proto | 13 +- .../namespaces/v1/namespace_grpc.pb.go | 250 + .../containerd/api/services/sandbox/v1/doc.go | 17 + .../api/services/sandbox/v1/sandbox.pb.go | 1948 ++++ .../api/services/sandbox/v1/sandbox.proto | 163 + .../services/sandbox/v1/sandbox_grpc.pb.go | 551 + .../api/services/snapshots/v1/snapshots.pb.go | 6830 +++--------- .../api/services/snapshots/v1/snapshots.proto | 24 +- .../snapshots/v1/snapshots_grpc.pb.go | 458 + .../api/services/streaming/v1/doc.go | 17 + .../api/services/streaming/v1/streaming.pb.go | 175 + .../api/services/streaming/v1/streaming.proto | 31 + .../streaming/v1/streaming_grpc.pb.go | 138 + .../api/services/tasks/v1/tasks.pb.go | 9360 ++++------------- .../api/services/tasks/v1/tasks.proto | 7 +- .../api/services/tasks/v1/tasks_grpc.pb.go | 690 ++ .../api/services/transfer/v1/doc.go | 17 + .../api/services/transfer/v1/transfer.pb.go | 274 + .../api/services/transfer/v1/transfer.proto | 39 + .../services/transfer/v1/transfer_grpc.pb.go | 106 + .../api/services/ttrpc/events/v1/events.pb.go | 982 +- .../api/services/ttrpc/events/v1/events.proto | 5 +- .../ttrpc/events/v1/events_fieldpath.pb.go | 55 + .../ttrpc/events/v1/events_ttrpc.pb.go | 45 + .../api/services/version/v1/version.pb.go | 611 +- .../api/services/version/v1/version.proto | 1 - .../services/version/v1/version_grpc.pb.go | 106 + .../containerd/api/types/descriptor.pb.go | 736 +- .../containerd/api/types/descriptor.proto | 4 +- .../containerd/api/types/metrics.pb.go | 596 +- .../containerd/api/types/metrics.proto | 3 +- .../containerd/api/types/mount.pb.go | 608 +- .../containerd/api/types/mount.proto | 2 - .../containerd/api/types/platform.pb.go | 561 +- .../containerd/api/types/platform.proto | 4 +- .../containerd/api/types/sandbox.pb.go | 346 + .../containerd/api/types/sandbox.proto | 51 + .../containerd/api/types/task/task.pb.go | 1280 +-- .../containerd/api/types/task/task.proto | 20 +- .../containerd/api/types/transfer/doc.go | 18 + .../api/types/transfer/imagestore.pb.go | 451 + .../api/types/transfer/imagestore.proto | 82 + .../api/types/transfer/importexport.pb.go | 320 + .../api/types/transfer/importexport.proto | 52 + .../api/types/transfer/progress.pb.go | 202 + .../api/types/transfer/progress.proto | 29 + .../api/types/transfer/registry.pb.go | 518 + .../api/types/transfer/registry.proto | 79 + .../api/types/transfer/streaming.pb.go | 226 + .../api/types/transfer/streaming.proto | 29 + .../compression/compression_fuzzer.go} | 18 +- .../link_default.go} | 12 +- .../containerd/archive/link_freebsd.go | 82 + .../containerd/containerd/archive/tar.go | 80 +- .../containerd/archive/tar_mostunix.go | 1 - .../containerd/containerd/archive/tar_opts.go | 26 + .../containerd/containerd/archive/tar_unix.go | 5 - .../containerd/archive/tar_windows.go | 29 +- .../containerd/archive/time_unix.go | 1 - .../containerd/archive/time_windows.go | 15 +- .../containerd/containerd/cio/io.go | 27 +- .../containerd/containerd/cio/io_unix.go | 10 +- .../containerd/containerd/client.go | 62 +- .../containerd/containerd/client_opts.go | 2 + .../containerd/containerd/container.go | 16 +- .../containerd/container_checkpoint_opts.go | 17 +- .../containerd/containerd/container_opts.go | 35 +- .../containerd/container_opts_unix.go | 1 - .../containerd/container_restore_opts.go | 4 +- .../containerd/containerd/containerd.service | 2 + .../containerd/containers/containers.go | 13 +- .../containerd/containerd/containerstore.go | 41 +- .../containerd/containerd/content/content.go | 81 +- .../containerd/containerd/content/helpers.go | 14 +- .../content/local/content_local_fuzzer.go | 76 + .../containerd/content/local/readerat.go | 5 + .../containerd/content/local/store.go | 6 +- .../containerd/content/local/store_bsd.go | 1 - .../containerd/content/local/store_openbsd.go | 1 - .../containerd/content/local/store_unix.go | 1 - .../containerd/content/local/test_helper.go | 38 + .../content/proxy/content_reader.go | 4 +- .../containerd/content/proxy/content_store.go | 47 +- .../content/proxy/content_writer.go | 22 +- .../containerd/defaults/defaults_unix.go | 1 - .../github.com/containerd/containerd/diff.go | 88 +- .../containerd/containerd/diff/diff.go | 19 +- .../containerd/diff/proxy/differ.go | 131 + .../containerd/containerd/diff/stream.go | 12 +- .../containerd/containerd/diff/stream_unix.go | 27 +- .../containerd/diff/stream_windows.go | 26 +- .../containerd/diff/walking/differ.go | 25 +- .../containerd/containerd/events.go | 11 +- .../containerd/containerd/events/events.go | 5 +- .../containerd/events/exchange/exchange.go | 15 +- .../github.com/containerd/containerd/image.go | 27 +- .../containerd/containerd/image_store.go | 57 +- .../containerd/images/archive/exporter.go | 12 + .../containerd/images/archive/importer.go | 7 +- .../containerd/images/archive/reference.go | 3 + .../containerd/images/converter/default.go | 1 + .../containerd/containerd/images/image.go | 4 +- .../containerd/images/mediatypes.go | 31 +- .../containerd/containerd/import.go | 16 +- .../containerd/containerd/install.go | 16 +- .../containerd/containerd/labels/labels.go | 8 + .../containerd/containerd/labels/validate.go | 11 +- .../containerd/leases/proxy/manager.go | 9 +- .../containerd/metadata/adaptors.go | 18 + .../containerd/metadata/boltutil/helpers.go | 29 +- .../containerd/containerd/metadata/buckets.go | 161 +- .../containerd/metadata/containers.go | 13 +- .../containerd/containerd/metadata/content.go | 29 +- .../containerd/containerd/metadata/db.go | 126 +- .../containerd/containerd/metadata/gc.go | 261 +- .../containerd/containerd/metadata/images.go | 17 +- .../containerd/containerd/metadata/sandbox.go | 373 + .../containerd/metadata/snapshot.go | 42 +- .../containerd/mount/fmountat_linux.go | 145 - .../containerd/mount/lookup_unix.go | 1 - .../containerd/mount/lookup_unsupported.go | 1 - .../containerd/mount/losetup_linux.go | 47 +- .../containerd/containerd/mount/mount.go | 47 +- .../containerd/mount/mount_freebsd.go | 15 +- .../containerd/mount/mount_linux.go | 78 +- .../containerd/containerd/mount/mount_unix.go | 53 +- .../containerd/mount/mount_unsupported.go | 46 + .../containerd/mount/mount_windows.go | 121 +- .../containerd/containerd/mount/temp.go | 2 +- .../containerd/containerd/mount/temp_unix.go | 1 - .../containerd/mount/temp_unsupported.go | 1 - .../containerd/containerd/namespaces.go | 6 +- .../containerd/containerd/oci/mounts.go | 4 +- .../containerd/oci/mounts_freebsd.go | 29 +- .../containerd/containerd/oci/spec.go | 23 +- .../containerd/containerd/oci/spec_opts.go | 231 +- .../containerd/oci/spec_opts_linux.go | 78 - .../containerd/oci/spec_opts_nonlinux.go | 16 - .../containerd/oci/spec_opts_unix.go | 8 - .../containerd/oci/spec_opts_windows.go | 70 +- .../containerd/containerd/oci/utils_unix.go | 1 - .../containerd/containerd/oss_fuzz.go | 26 + .../pkg/apparmor/apparmor_unsupported.go | 1 - .../containerd/pkg/cap/cap_linux.go | 9 +- .../containerd/pkg/cleanup/context.go | 52 + .../containerd/pkg/dialer/dialer_unix.go | 1 - .../{sys/epoll.go => pkg/epoch/context.go} | 35 +- .../containerd/containerd/pkg/epoch/epoch.go | 69 + .../pkg/runtimeoptions/v1/api.pb.go | 504 +- .../pkg/runtimeoptions/v1/api.proto | 14 +- .../pkg/seccomp/seccomp_unsupported.go | 1 - .../containerd/pkg/shutdown/shutdown.go | 6 + .../containerd/pkg/streaming/streaming.go | 47 + .../containerd/pkg/transfer/proxy/transfer.go | 122 + .../pkg/transfer/streaming/stream.go | 210 + .../pkg/transfer/streaming/writer.go | 130 + .../containerd/pkg/transfer/transfer.go | 136 + .../containerd/pkg/ttrpcutil/client.go | 5 +- .../containerd/pkg/unpack/unpacker.go | 537 + .../pkg/userns/userns_unsupported.go | 1 - .../containerd/platforms/cpuinfo_other.go | 1 - .../containerd/platforms/defaults_darwin.go | 1 - .../containerd/platforms/defaults_freebsd.go | 43 + .../containerd/platforms/defaults_unix.go | 3 +- .../containerd/platforms/defaults_windows.go | 40 +- .../containerd/platforms/platforms.go | 23 +- .../containerd/platforms/platforms_other.go | 34 + .../containerd/platforms/platforms_windows.go | 42 + .../containerd/containerd/plugin/plugin.go | 12 + .../containerd/plugin/plugin_go18.go | 1 - .../containerd/plugin/plugin_other.go | 1 - .../containerd/containerd/process.go | 5 +- .../containerd/containerd/protobuf/any.go | 47 + .../containerd/containerd/protobuf/compare.go | 41 + .../plugin/doc.go} | 2 + .../protobuf/plugin/fieldpath.pb.go | 144 + .../protobuf/plugin/fieldpath.proto | 42 + .../containerd/protobuf/proto/proto.go | 30 + .../containerd/protobuf/timestamp.go | 36 + .../containerd/protobuf/types/types.go | 28 + .../github.com/containerd/containerd/pull.go | 98 +- .../containerd/reference/docker/helpers.go | 58 + .../containerd/reference/docker/normalize.go | 196 + .../containerd/reference/docker/reference.go | 352 +- .../containerd/reference/docker/regexp.go | 191 + .../containerd/reference/docker/sort.go | 73 + .../containerd/remotes/docker/auth/fetch.go | 9 +- .../containerd/remotes/docker/authorizer.go | 10 +- .../remotes/docker/converter_fuzz.go | 55 + .../containerd/remotes/docker/fetcher.go | 108 +- .../containerd/remotes/docker/fetcher_fuzz.go | 81 + .../containerd/remotes/docker/handler.go | 7 +- .../containerd/remotes/docker/pusher.go | 3 + .../containerd/remotes/docker/resolver.go | 64 +- .../remotes/docker/schema1/converter.go | 12 +- .../containerd/remotes/errors/errors.go | 2 +- .../containerd/containerd/remotes/handlers.go | 95 +- .../containerd/containerd/remotes/resolver.go | 16 +- .../containerd/containerd/rootfs/diff.go | 17 +- .../containerd/rootfs/init_other.go | 1 - .../runtime/linux/runctypes/next.pb.txt | 5 +- .../runtime/linux/runctypes/runc.pb.go | 2298 +--- .../runtime/linux/runctypes/runc.proto | 10 +- .../runtime/v2/runc/options/next.pb.txt | 5 +- .../runtime/v2/runc/options/oci.pb.go | 1789 +--- .../runtime/v2/runc/options/oci.proto | 11 +- .../containerd/runtime/v2/shim/publisher.go | 6 +- .../containerd/runtime/v2/shim/shim.go | 62 +- .../containerd/runtime/v2/shim/shim_unix.go | 3 +- .../runtime/v2/shim/shim_windows.go | 14 +- .../containerd/runtime/v2/shim/util.go | 47 +- .../containerd/runtime/v2/shim/util_unix.go | 22 +- .../containerd/runtime/v2/task/shim.pb.go | 7312 ------------- .../containerd/containerd/sandbox.go | 247 + .../containerd/containerd/sandbox/bridge.go | 77 + .../containerd/sandbox/controller.go | 125 + .../containerd/containerd/sandbox/helpers.go | 68 + .../containerd/sandbox/proxy/controller.go | 142 + .../containerd/sandbox/proxy/store.go | 95 + .../containerd/containerd/sandbox/store.go | 116 + .../containerd/containerd/services.go | 99 + .../content/contentserver/contentserver.go | 104 +- .../services/introspection/introspection.go | 4 +- .../services/introspection/local.go | 45 +- .../introspection/pidns_linux.go} | 23 +- .../services/introspection/pidns_others.go | 23 + .../services/introspection/service.go | 5 +- .../services/server/config/config.go | 46 +- .../containerd/services/services.go | 4 +- .../snapshots/overlay/overlayutils/check.go | 1 - .../containerd/snapshots/proxy/proxy.go | 30 +- .../containerd/snapshots/snapshotter.go | 42 +- .../containerd/snapshotter_default_unix.go | 1 - .../containerd/snapshotter_opts_unix.go | 82 +- .../containerd/snapshotter_opts_windows.go | 27 + .../sys/filesys_deprecated_windows.go | 51 + .../containerd/containerd/sys/filesys_unix.go | 6 - .../containerd/sys/filesys_windows.go | 282 +- .../containerd/sys/oom_unsupported.go | 1 - .../containerd/sys/reaper/reaper_unix.go | 3 +- .../containerd/containerd/sys/socket_unix.go | 1 - .../github.com/containerd/containerd/task.go | 23 +- .../containerd/containerd/task_opts.go | 4 +- .../containerd/containerd/task_opts_unix.go | 1 - .../containerd/containerd/tracing/helpers.go | 94 + .../containerd/containerd/tracing/log.go | 64 - .../containerd/containerd/tracing/tracing.go | 98 +- .../containerd/containerd/transfer.go | 109 + .../containerd/containerd/unpacker.go | 427 - .../containerd/containerd/version/version.go | 2 +- .../golang/protobuf/descriptor/descriptor.go | 180 - .../grpc-gateway/internal/BUILD.bazel | 23 - .../grpc-gateway/internal/errors.pb.go | 189 - .../grpc-gateway/internal/errors.proto | 26 - .../grpc-gateway/runtime/BUILD.bazel | 85 - .../grpc-gateway/runtime/errors.go | 186 - .../grpc-gateway/runtime/fieldmask.go | 89 - .../grpc-gateway/runtime/mux.go | 300 - .../grpc-gateway/runtime/proto_errors.go | 106 - .../grpc-gateway/runtime/query.go | 406 - .../grpc-gateway/utilities/BUILD.bazel | 21 - .../grpc-gateway/{ => v2}/LICENSE.txt | 0 .../v2/internal/httprule/BUILD.bazel | 35 + .../v2/internal/httprule/compile.go | 121 + .../grpc-gateway/v2/internal/httprule/fuzz.go | 11 + .../v2/internal/httprule/parse.go | 369 + .../v2/internal/httprule/types.go | 60 + .../grpc-gateway/v2/runtime/BUILD.bazel | 97 + .../grpc-gateway/{ => v2}/runtime/context.go | 83 +- .../grpc-gateway/{ => v2}/runtime/convert.go | 62 +- .../grpc-gateway/{ => v2}/runtime/doc.go | 0 .../grpc-gateway/v2/runtime/errors.go | 181 + .../grpc-gateway/v2/runtime/fieldmask.go | 165 + .../grpc-gateway/{ => v2}/runtime/handler.go | 83 +- .../{ => v2}/runtime/marshal_httpbodyproto.go | 21 +- .../{ => v2}/runtime/marshal_json.go | 2 +- .../{ => v2}/runtime/marshal_jsonpb.go | 169 +- .../{ => v2}/runtime/marshal_proto.go | 5 +- .../{ => v2}/runtime/marshaler.go | 11 +- .../{ => v2}/runtime/marshaler_registry.go | 12 +- .../grpc-gateway/v2/runtime/mux.go | 442 + .../grpc-gateway/{ => v2}/runtime/pattern.go | 203 +- .../{ => v2}/runtime/proto2_convert.go | 2 +- .../grpc-gateway/v2/runtime/query.go | 342 + .../grpc-gateway/v2/utilities/BUILD.bazel | 31 + .../grpc-gateway/{ => v2}/utilities/doc.go | 0 .../{ => v2}/utilities/pattern.go | 0 .../{ => v2}/utilities/readerfactory.go | 0 .../v2/utilities/string_array_flag.go | 33 + .../grpc-gateway/{ => v2}/utilities/trie.go | 5 +- .../grpc/otelgrpc/{grpctrace.go => config.go} | 99 +- .../grpc/otelgrpc/interceptor.go | 164 +- .../grpc/otelgrpc/interceptorinfo.go | 50 + .../grpc/otelgrpc/internal/parse.go | 8 +- .../grpc/otelgrpc/metadata_supplier.go | 98 + .../grpc/otelgrpc/semconv.go | 6 +- .../grpc/otelgrpc/version.go | 4 +- .../net/http/httptrace/otelhttptrace/api.go | 2 +- .../httptrace/otelhttptrace/clienttrace.go | 20 +- .../http/httptrace/otelhttptrace/httptrace.go | 20 +- .../http/httptrace/otelhttptrace/version.go | 4 +- .../net/http/otelhttp/client.go | 20 +- .../net/http/otelhttp/common.go | 4 +- .../net/http/otelhttp/config.go | 40 +- .../net/http/otelhttp/handler.go | 78 +- .../net/http/otelhttp/labeler.go | 2 +- .../net/http/otelhttp/transport.go | 17 +- .../net/http/otelhttp/version.go | 4 +- .../instrumentation/net/http/otelhttp/wrap.go | 5 +- vendor/go.opentelemetry.io/otel/.gitignore | 2 +- vendor/go.opentelemetry.io/otel/.golangci.yml | 217 +- vendor/go.opentelemetry.io/otel/.lycheeignore | 6 + .../otel/.markdown-link.json | 16 - vendor/go.opentelemetry.io/otel/CHANGELOG.md | 638 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 2 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 7 +- vendor/go.opentelemetry.io/otel/Makefile | 59 +- vendor/go.opentelemetry.io/otel/README.md | 50 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 43 +- .../otel/attribute/encoder.go | 86 +- .../otel/attribute/iterator.go | 80 +- .../go.opentelemetry.io/otel/attribute/set.go | 151 +- .../otel/attribute/value.go | 97 +- .../otel/baggage/baggage.go | 30 +- .../go.opentelemetry.io/otel/codes/codes.go | 10 + .../otel/exporters/otlp/internal/config.go | 34 + .../otlp/internal/envconfig/envconfig.go | 199 + .../otel/exporters/otlp/internal/header.go | 24 + .../exporters/otlp/internal/partialsuccess.go | 64 + .../exporters/otlp/internal/retry/retry.go | 35 +- .../exporters/otlp/internal/wrappederror.go | 61 + .../otel/exporters/otlp/otlptrace/README.md | 20 +- .../otel/exporters/otlp/otlptrace/exporter.go | 18 +- .../internal/otlpconfig/envconfig.go | 249 +- .../otlptrace/internal/otlpconfig/options.go | 28 +- .../internal/otlpconfig/optiontypes.go | 9 +- .../internal/tracetransform/attribute.go | 4 +- .../tracetransform/instrumentation.go | 6 +- .../otlptrace/internal/tracetransform/span.go | 74 +- .../otlp/otlptrace/otlptracegrpc/client.go | 27 +- .../otlp/otlptrace/otlptracegrpc/options.go | 3 +- .../otlp/otlptrace/otlptracehttp/client.go | 98 +- .../otlp/otlptrace/otlptracehttp/options.go | 10 +- vendor/go.opentelemetry.io/otel/handler.go | 28 +- .../otel/internal/attribute/attribute.go | 111 + .../otel/internal/baggage/context.go | 9 +- .../otel/internal/global/internal_logging.go | 34 +- .../otel/internal/global/state.go | 53 +- .../otel/internal/metric/global/meter.go | 287 - .../otel/internal/metric/registry/registry.go | 139 - .../otel/internal/rawhelpers.go | 2 +- .../go.opentelemetry.io/otel/metric/config.go | 88 +- vendor/go.opentelemetry.io/otel/metric/doc.go | 44 - .../otel/metric/global/global.go | 42 + .../otel/metric/global/metric.go | 49 - .../otel/metric/instrument/asyncfloat64.go | 130 + .../otel/metric/instrument/asyncint64.go | 130 + .../otel/metric/instrument/instrument.go | 88 + .../otel/metric/instrument/syncfloat64.go | 85 + .../otel/metric/instrument/syncint64.go | 85 + .../metric/internal/global/instruments.go | 355 + .../otel/metric/internal/global/meter.go | 354 + .../internal/global/state.go} | 54 +- .../go.opentelemetry.io/otel/metric/meter.go | 138 + .../go.opentelemetry.io/otel/metric/metric.go | 538 - .../otel/metric/metric_instrument.go | 464 - .../go.opentelemetry.io/otel/metric/noop.go | 127 +- .../otel/metric/number/kind_string.go | 24 - .../otel/metric/number/number.go | 538 - .../otel/metric/sdkapi/descriptor.go | 70 - .../otel/metric/sdkapi/instrumentkind.go | 80 - .../metric/sdkapi/instrumentkind_string.go | 28 - .../otel/metric/sdkapi/noop.go | 66 - .../otel/metric/sdkapi/sdkapi.go | 159 - .../otel/sdk/instrumentation/doc.go | 24 + .../otel/sdk/instrumentation/library.go | 18 +- .../doc.go => sdk/instrumentation/scope.go} | 20 +- .../otel/sdk/internal/env/env.go | 117 +- .../otel/sdk/resource/auto.go | 2 +- .../otel/sdk/resource/builtin.go | 10 +- .../otel/sdk/resource/config.go | 32 +- .../otel/sdk/resource/container.go | 100 + .../otel/sdk/resource/env.go | 19 +- .../otel/sdk/resource/os.go | 4 +- .../otel/sdk/resource/os_unix.go | 20 +- .../otel/sdk/resource/process.go | 25 +- .../otel/sdk/resource/resource.go | 12 +- .../otel/sdk/trace/batch_span_processor.go | 84 +- .../otel/sdk/trace/config.go | 68 - .../otel/sdk/trace/evictedqueue.go | 7 +- .../otel/sdk/trace/id_generator.go | 6 +- .../otel/sdk/trace/provider.go | 201 +- .../otel/sdk/trace/sampler_env.go | 108 + .../otel/sdk/trace/sampling.go | 19 +- .../otel/sdk/trace/simple_span_processor.go | 2 +- .../otel/sdk/trace/snapshot.go | 40 +- .../otel/sdk/trace/span.go | 185 +- .../otel/sdk/trace/span_limits.go | 125 + .../otel/sdk/trace/span_processor.go | 5 + .../otel/sdk/trace/tracer.go | 11 +- .../otel/sdk/trace/tracetest/recorder.go | 1 + .../otel/sdk/trace/tracetest/span.go | 74 +- .../otel/semconv/internal/http.go | 336 + .../otel/semconv/internal/v2/http.go | 404 + .../otel/semconv/internal/v2/net.go | 324 + .../{metric/number => semconv/v1.17.0}/doc.go | 15 +- .../otel/semconv/v1.17.0/event.go | 199 + .../unit.go => semconv/v1.17.0/exception.go} | 10 +- .../unit/doc.go => semconv/v1.17.0/http.go} | 13 +- .../otel/semconv/v1.17.0/httpconv/http.go | 150 + .../otel/semconv/v1.17.0/netconv/net.go | 66 + .../otel/semconv/v1.17.0/resource.go | 2010 ++++ .../otel/semconv/v1.17.0/schema.go | 20 + .../otel/semconv/v1.17.0/trace.go | 3375 ++++++ .../otel/semconv/v1.7.0/http.go | 274 +- .../otel/semconv/v1.7.0/schema.go | 2 +- vendor/go.opentelemetry.io/otel/trace.go | 7 +- .../go.opentelemetry.io/otel/trace/config.go | 19 +- vendor/go.opentelemetry.io/otel/trace/doc.go | 2 +- vendor/go.opentelemetry.io/otel/trace/noop.go | 2 +- .../go.opentelemetry.io/otel/trace/trace.go | 84 +- .../otel/trace/tracestate.go | 29 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 21 +- .../collector/trace/v1/trace_config.pb.go | 573 - .../collector/trace/v1/trace_service.pb.go | 191 +- .../collector/trace/v1/trace_service.pb.gw.go | 14 +- .../trace/v1/trace_service_grpc.pb.go | 12 +- .../proto/otlp/common/v1/common.pb.go | 194 +- .../proto/otlp/resource/v1/resource.pb.go | 31 +- .../proto/otlp/trace/v1/trace.pb.go | 342 +- vendor/modules.txt | 101 +- 508 files changed, 53300 insertions(+), 65747 deletions(-) rename vendor/{go.opentelemetry.io/otel/internal/metric => github.com/AdamKorcz/go-118-fuzz-build}/LICENSE (100%) create mode 100644 vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/f.go create mode 100644 vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/t.go create mode 100644 vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/unsupported_funcs.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/bindfilter/bind_filter.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/bindfilter/zsyscall_windows.go create mode 100644 vendor/github.com/containerd/containerd/.cirrus.yml create mode 100644 vendor/github.com/containerd/containerd/api/events/container_fieldpath.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/image_fieldpath.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/namespace_fieldpath.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/sandbox.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/sandbox.proto create mode 100644 vendor/github.com/containerd/containerd/api/events/sandbox_fieldpath.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/snapshot_fieldpath.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/task_fieldpath.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/doc.go create mode 100644 vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto create mode 100644 vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_ttrpc.pb.go rename vendor/github.com/containerd/containerd/{runtime/v2/task => api/runtime/task/v2}/doc.go (100%) create mode 100644 vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.pb.go rename vendor/github.com/containerd/containerd/{runtime/v2/task => api/runtime/task/v2}/shim.proto (91%) create mode 100644 vendor/github.com/containerd/containerd/api/runtime/task/v2/shim_ttrpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/containers/v1/containers_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/content/v1/content_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/diff/v1/diff_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/events/v1/events_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/images/v1/images_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/leases/v1/leases_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/sandbox/v1/doc.go create mode 100644 vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto create mode 100644 vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/streaming/v1/doc.go create mode 100644 vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.proto create mode 100644 vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/transfer/v1/doc.go create mode 100644 vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.proto create mode 100644 vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_fieldpath.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_ttrpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/version/v1/version_grpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/types/sandbox.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/types/sandbox.proto create mode 100644 vendor/github.com/containerd/containerd/api/types/transfer/doc.go create mode 100644 vendor/github.com/containerd/containerd/api/types/transfer/imagestore.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/types/transfer/imagestore.proto create mode 100644 vendor/github.com/containerd/containerd/api/types/transfer/importexport.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/types/transfer/importexport.proto create mode 100644 vendor/github.com/containerd/containerd/api/types/transfer/progress.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/types/transfer/progress.proto create mode 100644 vendor/github.com/containerd/containerd/api/types/transfer/registry.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/types/transfer/registry.proto create mode 100644 vendor/github.com/containerd/containerd/api/types/transfer/streaming.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/types/transfer/streaming.proto rename vendor/github.com/containerd/containerd/{mount/subprocess_unsafe_linux.go => archive/compression/compression_fuzzer.go} (68%) rename vendor/github.com/containerd/containerd/{sys/userns_deprecated.go => archive/link_default.go} (68%) create mode 100644 vendor/github.com/containerd/containerd/archive/link_freebsd.go create mode 100644 vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go create mode 100644 vendor/github.com/containerd/containerd/content/local/test_helper.go create mode 100644 vendor/github.com/containerd/containerd/diff/proxy/differ.go create mode 100644 vendor/github.com/containerd/containerd/metadata/sandbox.go delete mode 100644 vendor/github.com/containerd/containerd/mount/fmountat_linux.go create mode 100644 vendor/github.com/containerd/containerd/mount/mount_unsupported.go create mode 100644 vendor/github.com/containerd/containerd/oss_fuzz.go create mode 100644 vendor/github.com/containerd/containerd/pkg/cleanup/context.go rename vendor/github.com/containerd/containerd/{sys/epoll.go => pkg/epoch/context.go} (53%) create mode 100644 vendor/github.com/containerd/containerd/pkg/epoch/epoch.go create mode 100644 vendor/github.com/containerd/containerd/pkg/streaming/streaming.go create mode 100644 vendor/github.com/containerd/containerd/pkg/transfer/proxy/transfer.go create mode 100644 vendor/github.com/containerd/containerd/pkg/transfer/streaming/stream.go create mode 100644 vendor/github.com/containerd/containerd/pkg/transfer/streaming/writer.go create mode 100644 vendor/github.com/containerd/containerd/pkg/transfer/transfer.go create mode 100644 vendor/github.com/containerd/containerd/pkg/unpack/unpacker.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_freebsd.go create mode 100644 vendor/github.com/containerd/containerd/platforms/platforms_other.go create mode 100644 vendor/github.com/containerd/containerd/platforms/platforms_windows.go create mode 100644 vendor/github.com/containerd/containerd/protobuf/any.go create mode 100644 vendor/github.com/containerd/containerd/protobuf/compare.go rename vendor/github.com/containerd/containerd/{mount/subprocess_unsafe_linux.s => protobuf/plugin/doc.go} (97%) create mode 100644 vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go create mode 100644 vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto create mode 100644 vendor/github.com/containerd/containerd/protobuf/proto/proto.go create mode 100644 vendor/github.com/containerd/containerd/protobuf/timestamp.go create mode 100644 vendor/github.com/containerd/containerd/protobuf/types/types.go create mode 100644 vendor/github.com/containerd/containerd/reference/docker/helpers.go create mode 100644 vendor/github.com/containerd/containerd/reference/docker/normalize.go create mode 100644 vendor/github.com/containerd/containerd/reference/docker/regexp.go create mode 100644 vendor/github.com/containerd/containerd/reference/docker/sort.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/converter_fuzz.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/fetcher_fuzz.go delete mode 100644 vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go create mode 100644 vendor/github.com/containerd/containerd/sandbox.go create mode 100644 vendor/github.com/containerd/containerd/sandbox/bridge.go create mode 100644 vendor/github.com/containerd/containerd/sandbox/controller.go create mode 100644 vendor/github.com/containerd/containerd/sandbox/helpers.go create mode 100644 vendor/github.com/containerd/containerd/sandbox/proxy/controller.go create mode 100644 vendor/github.com/containerd/containerd/sandbox/proxy/store.go create mode 100644 vendor/github.com/containerd/containerd/sandbox/store.go rename vendor/github.com/containerd/containerd/{sys/fds.go => services/introspection/pidns_linux.go} (65%) create mode 100644 vendor/github.com/containerd/containerd/services/introspection/pidns_others.go create mode 100644 vendor/github.com/containerd/containerd/snapshotter_opts_windows.go create mode 100644 vendor/github.com/containerd/containerd/sys/filesys_deprecated_windows.go create mode 100644 vendor/github.com/containerd/containerd/tracing/helpers.go create mode 100644 vendor/github.com/containerd/containerd/transfer.go delete mode 100644 vendor/github.com/containerd/containerd/unpacker.go delete mode 100644 vendor/github.com/golang/protobuf/descriptor/descriptor.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/LICENSE.txt (100%) create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/runtime/context.go (74%) rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/runtime/convert.go (80%) rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/runtime/doc.go (100%) create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/runtime/handler.go (74%) rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/runtime/marshal_httpbodyproto.go (54%) rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/runtime/marshal_json.go (95%) rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/runtime/marshal_jsonpb.go (57%) rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/runtime/marshal_proto.go (93%) rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/runtime/marshaler.go (80%) rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/runtime/marshaler_registry.go (91%) create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/runtime/pattern.go (57%) rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/runtime/proto2_convert.go (98%) create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/utilities/doc.go (100%) rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/utilities/pattern.go (100%) rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/utilities/readerfactory.go (100%) create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go rename vendor/github.com/grpc-ecosystem/grpc-gateway/{ => v2}/utilities/trie.go (98%) rename vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/{grpctrace.go => config.go} (58%) create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go create mode 100644 vendor/go.opentelemetry.io/otel/.lycheeignore delete mode 100644 vendor/go.opentelemetry.io/otel/.markdown-link.json create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/global/global.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/global/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go rename vendor/go.opentelemetry.io/otel/{internal/metric/global/metric.go => metric/internal/global/state.go} (54%) create mode 100644 vendor/go.opentelemetry.io/otel/metric/meter.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/metric.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/metric_instrument.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/number/kind_string.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/number/number.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go delete mode 100644 vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go rename vendor/go.opentelemetry.io/otel/{internal/metric/registry/doc.go => sdk/instrumentation/scope.go} (56%) create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/container.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/config.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go rename vendor/go.opentelemetry.io/otel/{metric/number => semconv/v1.17.0}/doc.go (60%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go rename vendor/go.opentelemetry.io/otel/{metric/unit/unit.go => semconv/v1.17.0/exception.go} (75%) rename vendor/go.opentelemetry.io/otel/{metric/unit/doc.go => semconv/v1.17.0/http.go} (65%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/netconv/net.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go delete mode 100644 vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_config.pb.go diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go index 7461107357..890261ce39 100644 --- a/builder/builder-next/adapters/containerimage/pull.go +++ b/builder/builder-next/adapters/containerimage/pull.go @@ -19,7 +19,7 @@ import ( ctdreference "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" - "github.com/containerd/containerd/remotes/docker/schema1" + "github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019: "github.com/containerd/containerd/remotes/docker/schema1" is deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1. distreference "github.com/distribution/reference" dimages "github.com/docker/docker/daemon/images" "github.com/docker/docker/distribution/metadata" diff --git a/daemon/containerd/image_pull.go b/daemon/containerd/image_pull.go index dfd3be44bb..5ad764df8f 100644 --- a/daemon/containerd/image_pull.go +++ b/daemon/containerd/image_pull.go @@ -115,7 +115,7 @@ func (i *ImageService) PullImage(ctx context.Context, ref reference.Named, platf // Allow pulling application/vnd.docker.distribution.manifest.v1+prettyjws images // by converting them to OCI manifests. - opts = append(opts, containerd.WithSchema1Conversion) + opts = append(opts, containerd.WithSchema1Conversion) //nolint:staticcheck // Ignore SA1019: containerd.WithSchema1Conversion is deprecated: use Schema 2 or OCI images. img, err := i.client.Pull(ctx, ref.String(), opts...) if err != nil { diff --git a/daemon/runtime_unix_test.go b/daemon/runtime_unix_test.go index 6481a4a7cf..2566c480c7 100644 --- a/daemon/runtime_unix_test.go +++ b/daemon/runtime_unix_test.go @@ -14,7 +14,9 @@ import ( "github.com/docker/docker/api/types/system" "github.com/docker/docker/daemon/config" "github.com/docker/docker/errdefs" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/imdario/mergo" + "google.golang.org/protobuf/proto" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" ) @@ -242,11 +244,11 @@ func TestGetRuntime(t *testing.T) { assert.Assert(t, ok, "stock runtime could not be found (test needs to be updated)") stockRuntime.Features = nil - configdOpts := *stockRuntime.Opts.(*v2runcoptions.Options) + configdOpts := proto.Clone(stockRuntime.Opts.(*v2runcoptions.Options)).(*v2runcoptions.Options) configdOpts.BinaryName = configuredRuntime.Path wantConfigdRuntime := &shimConfig{ Shim: stockRuntime.Shim, - Opts: &configdOpts, + Opts: configdOpts, } for _, tt := range []struct { @@ -334,7 +336,10 @@ func TestGetRuntime(t *testing.T) { if tt.want != nil { assert.Check(t, err) got := &shimConfig{Shim: shim, Opts: opts} - assert.Check(t, is.DeepEqual(got, tt.want)) + assert.Check(t, is.DeepEqual(got, tt.want, + cmpopts.IgnoreUnexported(runtimeoptions_v1.Options{}), + cmpopts.IgnoreUnexported(v2runcoptions.Options{}), + )) } else { assert.Check(t, is.Equal(shim, "")) assert.Check(t, is.Nil(opts)) diff --git a/libcontainerd/remote/client.go b/libcontainerd/remote/client.go index 23b16ea06c..d3816223e5 100644 --- a/libcontainerd/remote/client.go +++ b/libcontainerd/remote/client.go @@ -22,6 +22,7 @@ import ( cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/protobuf" v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options" "github.com/containerd/typeurl/v2" "github.com/docker/docker/errdefs" @@ -29,11 +30,13 @@ import ( libcontainerdtypes "github.com/docker/docker/libcontainerd/types" "github.com/docker/docker/pkg/ioutils" "github.com/hashicorp/go-multierror" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // DockerContainerBundlePath is the label key pointing to the container's bundle path @@ -163,7 +166,7 @@ func (c *container) Start(ctx context.Context, checkpointDir string, withStdin b // remove the checkpoint when we're done defer func() { if checkpoint != nil { - err := c.client.client.ContentStore().Delete(ctx, checkpoint.Digest) + err := c.client.client.ContentStore().Delete(ctx, digest.Digest(checkpoint.Digest)) if err != nil { c.client.logger.WithError(err).WithFields(log.Fields{ "ref": checkpointDir, @@ -205,10 +208,10 @@ func (c *container) Start(ctx context.Context, checkpointDir string, withStdin b if runtime.GOOS != "windows" { taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error { if c.v2runcoptions != nil { - opts := *c.v2runcoptions + opts := proto.Clone(c.v2runcoptions).(*v2runcoptions.Options) opts.IoUid = uint32(uid) opts.IoGid = uint32(gid) - info.Options = &opts + info.Options = opts } return nil }) @@ -342,7 +345,7 @@ func (t *task) Stats(ctx context.Context) (*libcontainerdtypes.Stats, error) { if err != nil { return nil, err } - return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil + return libcontainerdtypes.InterfaceToStats(protobuf.FromTimestamp(m.Timestamp), v), nil } func (t *task) Summary(ctx context.Context) ([]libcontainerdtypes.Summary, error) { @@ -675,7 +678,7 @@ func (c *client) processEventStream(ctx context.Context, ns string) { ProcessID: t.ID, Pid: t.Pid, ExitCode: t.ExitStatus, - ExitedAt: t.ExitedAt, + ExitedAt: protobuf.FromTimestamp(t.ExitedAt), }) case *apievents.TaskOOM: c.processEvent(ctx, libcontainerdtypes.EventOOM, libcontainerdtypes.EventInfo{ @@ -734,8 +737,8 @@ func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.R } return &types.Descriptor{ MediaType: mediaType, - Digest: writer.Digest(), - Size_: size, + Digest: writer.Digest().Encoded(), + Size: size, }, nil } diff --git a/vendor.mod b/vendor.mod index 3baf9afaa9..a178966d8e 100644 --- a/vendor.mod +++ b/vendor.mod @@ -26,7 +26,7 @@ require ( github.com/cloudflare/cfssl v1.6.4 github.com/container-orchestrated-devices/container-device-interface v0.6.1 github.com/containerd/cgroups/v3 v3.0.2 - github.com/containerd/containerd v1.6.24 + github.com/containerd/containerd v1.7.6 github.com/containerd/continuity v0.4.2 github.com/containerd/fifo v1.1.0 github.com/containerd/typeurl/v2 v2.1.1 @@ -91,12 +91,12 @@ require ( github.com/vishvananda/netlink v1.2.1-beta.2 github.com/vishvananda/netns v0.0.4 go.etcd.io/bbolt v1.3.7 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 - go.opentelemetry.io/otel v1.4.1 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 - go.opentelemetry.io/otel/sdk v1.4.1 - go.opentelemetry.io/otel/trace v1.4.1 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 + go.opentelemetry.io/otel v1.14.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 + go.opentelemetry.io/otel/sdk v1.14.0 + go.opentelemetry.io/otel/trace v1.14.0 golang.org/x/mod v0.10.0 golang.org/x/net v0.10.0 golang.org/x/sync v0.3.0 @@ -105,6 +105,7 @@ require ( golang.org/x/time v0.3.0 google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 google.golang.org/grpc v1.56.2 + google.golang.org/protobuf v1.31.0 gotest.tools/v3 v3.5.0 resenje.org/singleflight v0.4.0 ) @@ -113,6 +114,7 @@ require ( cloud.google.com/go v0.110.0 // indirect cloud.google.com/go/compute v1.19.1 // indirect cloud.google.com/go/longrunning v0.4.1 // indirect + github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect github.com/armon/go-metrics v0.4.1 // indirect @@ -155,7 +157,7 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect @@ -190,13 +192,12 @@ require ( go.etcd.io/etcd/raft/v3 v3.5.6 // indirect go.etcd.io/etcd/server/v3 v3.5.6 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 // indirect - go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect - go.opentelemetry.io/otel/metric v0.27.0 // indirect - go.opentelemetry.io/proto/otlp v0.12.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.40.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 // indirect + go.opentelemetry.io/otel/metric v0.37.0 // indirect + go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect @@ -205,7 +206,6 @@ require ( golang.org/x/tools v0.8.0 // indirect google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/klog/v2 v2.90.1 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/vendor.sum b/vendor.sum index d4af43e105..44cf660fd8 100644 --- a/vendor.sum +++ b/vendor.sum @@ -19,6 +19,8 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6 cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -26,6 +28,7 @@ cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNF cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= @@ -46,6 +49,7 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.gitea.io/sdk/gitea v0.12.0/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= @@ -59,6 +63,8 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGy git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0/go.mod h1:0mMDvQFeLbbn1Wy8P2j3hwFhqBq+FKn8OZPno8WLmp8= github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= @@ -218,8 +224,8 @@ github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngE github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= @@ -239,7 +245,6 @@ github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGr github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -303,8 +308,8 @@ github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.6.24 h1:HKF4bfN7WoCk+/n/hi3OrTKJlPWxZmeg1uVDRpEPlXA= -github.com/containerd/containerd v1.6.24/go.mod h1:06DkIUikjOcYdqFgOXDwBHO+qR4/qfbMPQ9XxtAGs1c= +github.com/containerd/containerd v1.7.6 h1:oNAVsnhPoy4BTPQivLgTzI9Oleml9l/+eYIDYXRCYo8= +github.com/containerd/containerd v1.7.6/go.mod h1:SY6lrkkuJT40BVNO37tlYTSnKJnP5AXBc0fhx0q+TJ4= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= @@ -444,7 +449,6 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee h1:v6Eju/FhxsACGNipFEPBZZAzGr1F/jlRQr1qiBw2nEE= @@ -545,6 +549,8 @@ github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2 h1:xisWqjiKEff2B0KfFYGpCqc3M3zdTz+OHQHRc09FeYk= github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -559,6 +565,7 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -614,6 +621,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -635,12 +643,14 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg= @@ -699,8 +709,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= github.com/hanwen/go-fuse/v2 v2.0.3/go.mod h1:0EQM6aH2ctVpvZ6a+onrQ/vaykxh2GH7hy3e13vzTUY= @@ -1188,8 +1200,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1313,52 +1325,48 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 h1:n9b7AAdbQtQ0k9dm0Dm2/KUcUqtG8i2O15KzNaDze8c= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0/go.mod h1:LsankqVDx4W+RhZNA5uWarULII/MBhF5qwCYxTuyXjs= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 h1:Wjp9vsVSIEyvdiaECfqxY9xBqQ7JaSCGtvHgR4doXZk= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0/go.mod h1:vHItvsnJtp7ES++nFLLFBzUWny7fJQSvTlxFcqQGUr4= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 h1:SLme4Porm+UwX0DdHMxlwRt7FzPSE0sys81bet2o0pU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0/go.mod h1:tLYsuf2v8fZreBVwp9gVMhefZlLFZaUiNVSq8QxXRII= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 h1:5jD3teb4Qh7mx/nfzq4jO2WFFpvXD0vYWFDrdvNWmXk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0/go.mod h1:UMklln0+MRhZC4e3PwmN3pCtq4DyIadWw4yikh6bNrw= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.40.0 h1:ZjF6qLnAVNq6xUh0sK2mCEqwnRrpgr0mLALQXJL34NI= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.40.0/go.mod h1:SD34NWTW0VMH2VvFVfArHPoF+L1ddT4MOQCTb2l8T5I= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 h1:lE9EJyw3/JhrjWH/hEy9FptnalDQgj7vpbgC2KCCCxE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI= go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= -go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk= -go.opentelemetry.io/otel v1.4.1 h1:QbINgGDDcoQUoMJa2mMaWno49lja9sHwp6aoa2n3a4g= -go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 h1:imIM3vRDMyZK1ypQlQlO+brE22I9lRhJsBDXpDWjlz8= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= +go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 h1:WPpPsAAs8I2rA47v5u0558meKmmwm1Dj99ZbqCV8sZ8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1/go.mod h1:o5RW5o2pKpJLD5dNTCmjF1DorYwMeFJmb/rKr5sLaa8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 h1:AxqDiGk8CorEXStMDZF5Hz9vo9Z7ZZ+I5m8JRl/ko40= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1/go.mod h1:c6E4V3/U+miqjs/8l950wggHGL1qzlp0Ypj9xoGrPqo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 h1:8qOago/OqoFclMUUj/184tZyRdDZFpcejSjbk5Jrl6Y= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1/go.mod h1:VwYo0Hak6Efuy0TXsZs8o1hnV3dHDPNtDbycG0hI8+M= -go.opentelemetry.io/otel/internal/metric v0.27.0 h1:9dAVGAfFiiEq5NVB9FUJ5et+btbDQAUIJehJ+ikyryk= -go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw= -go.opentelemetry.io/otel/metric v0.27.0 h1:HhJPsGhJoKRSegPQILFbODU56NS/L1UE4fS1sC5kIwQ= -go.opentelemetry.io/otel/metric v0.27.0/go.mod h1:raXDJ7uP2/Jc0nVZWQjJtzoyssOYWu/+pjZqRzfvZ7g= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 h1:ap+y8RXX3Mu9apKVtOkM6WSFESLM8K3wNQyOU8sWHcc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 h1:3jAYbRHQAqzLjd9I4tzxwJ8Pk/N6AqBcF6m1ZHrxG94= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0/go.mod h1:+N7zNjIJv4K+DeX67XXET0P+eIciESgaFDBqh+ZJFS4= +go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs= +go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s= go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= -go.opentelemetry.io/otel/sdk v1.4.1 h1:J7EaW71E0v87qflB4cDolaqq3AcujGrtyIPGQoZOB0Y= -go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE= +go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= +go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= -go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE= -go.opentelemetry.io/otel/trace v1.4.1 h1:O+16qcdTrT7zxv2J6GejTPFinSwA++cYerC5iSiF8EQ= -go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= +go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= +go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= -go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c= -go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ= +go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= @@ -1473,7 +1481,9 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1498,6 +1508,7 @@ golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= @@ -1576,11 +1587,13 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1706,8 +1719,12 @@ golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200502202811-ed308ab3e770/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -1746,6 +1763,9 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -1791,9 +1811,15 @@ google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -1812,6 +1838,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= @@ -1819,8 +1847,7 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1889,6 +1916,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.0.0-20180904230853-4e7be11eab3f/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/LICENSE b/vendor/github.com/AdamKorcz/go-118-fuzz-build/LICENSE similarity index 100% rename from vendor/go.opentelemetry.io/otel/internal/metric/LICENSE rename to vendor/github.com/AdamKorcz/go-118-fuzz-build/LICENSE diff --git a/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/f.go b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/f.go new file mode 100644 index 0000000000..3f4d9aeb61 --- /dev/null +++ b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/f.go @@ -0,0 +1,207 @@ +package testing + +import ( + "fmt" + fuzz "github.com/AdaLogics/go-fuzz-headers" + "os" + "reflect" +) + +type F struct { + Data []byte + T *T + FuzzFunc func(*T, any) +} + +func (f *F) CleanupTempDirs() { + f.T.CleanupTempDirs() +} + +func (f *F) Add(args ...any) {} +func (c *F) Cleanup(f func()) {} +func (c *F) Error(args ...any) {} +func (c *F) Errorf(format string, args ...any) {} +func (f *F) Fail() {} +func (c *F) FailNow() {} +func (c *F) Failed() bool { return false } +func (c *F) Fatal(args ...any) {} +func (c *F) Fatalf(format string, args ...any) {} +func (f *F) Fuzz(ff any) { + // we are assuming that ff is a func. + // TODO: Add a check for UX purposes + + fn := reflect.ValueOf(ff) + fnType := fn.Type() + var types []reflect.Type + for i := 1; i < fnType.NumIn(); i++ { + t := fnType.In(i) + + types = append(types, t) + } + args := []reflect.Value{reflect.ValueOf(f.T)} + fuzzConsumer := fuzz.NewConsumer(f.Data) + for _, v := range types { + switch v.String() { + case "[]uint8": + b, err := fuzzConsumer.GetBytes() + if err != nil { + return + } + newBytes := reflect.New(v) + newBytes.Elem().SetBytes(b) + args = append(args, newBytes.Elem()) + case "string": + s, err := fuzzConsumer.GetString() + if err != nil { + return + } + newString := reflect.New(v) + newString.Elem().SetString(s) + args = append(args, newString.Elem()) + case "int": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newInt := reflect.New(v) + newInt.Elem().SetInt(int64(randInt)) + args = append(args, newInt.Elem()) + case "int8": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newInt := reflect.New(v) + newInt.Elem().SetInt(int64(randInt)) + args = append(args, newInt.Elem()) + case "int16": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newInt := reflect.New(v) + newInt.Elem().SetInt(int64(randInt)) + args = append(args, newInt.Elem()) + case "int32": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newInt := reflect.New(v) + newInt.Elem().SetInt(int64(randInt)) + args = append(args, newInt.Elem()) + case "int64": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newInt := reflect.New(v) + newInt.Elem().SetInt(int64(randInt)) + args = append(args, newInt.Elem()) + case "uint": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newUint := reflect.New(v) + newUint.Elem().SetUint(uint64(randInt)) + args = append(args, newUint.Elem()) + case "uint8": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newUint := reflect.New(v) + newUint.Elem().SetUint(uint64(randInt)) + args = append(args, newUint.Elem()) + case "uint16": + randInt, err := fuzzConsumer.GetUint16() + if err != nil { + return + } + newUint16 := reflect.New(v) + newUint16.Elem().SetUint(uint64(randInt)) + args = append(args, newUint16.Elem()) + case "uint32": + randInt, err := fuzzConsumer.GetUint32() + if err != nil { + return + } + newUint32 := reflect.New(v) + newUint32.Elem().SetUint(uint64(randInt)) + args = append(args, newUint32.Elem()) + case "uint64": + randInt, err := fuzzConsumer.GetUint64() + if err != nil { + return + } + newUint64 := reflect.New(v) + newUint64.Elem().SetUint(uint64(randInt)) + args = append(args, newUint64.Elem()) + case "rune": + randRune, err := fuzzConsumer.GetRune() + if err != nil { + return + } + newRune := reflect.New(v) + newRune.Elem().Set(reflect.ValueOf(randRune)) + args = append(args, newRune.Elem()) + case "float32": + randFloat, err := fuzzConsumer.GetFloat32() + if err != nil { + return + } + newFloat := reflect.New(v) + newFloat.Elem().Set(reflect.ValueOf(randFloat)) + args = append(args, newFloat.Elem()) + case "float64": + randFloat, err := fuzzConsumer.GetFloat64() + if err != nil { + return + } + newFloat := reflect.New(v) + newFloat.Elem().Set(reflect.ValueOf(randFloat)) + args = append(args, newFloat.Elem()) + case "bool": + randBool, err := fuzzConsumer.GetBool() + if err != nil { + return + } + newBool := reflect.New(v) + newBool.Elem().Set(reflect.ValueOf(randBool)) + args = append(args, newBool.Elem()) + default: + fmt.Println(v.String()) + } + } + fn.Call(args) +} +func (f *F) Helper() {} +func (c *F) Log(args ...any) { + fmt.Println(args...) +} +func (c *F) Logf(format string, args ...any) { + fmt.Println(format, args) +} +func (c *F) Name() string { return "libFuzzer" } +func (c *F) Setenv(key, value string) {} +func (c *F) Skip(args ...any) { + panic("GO-FUZZ-BUILD-PANIC") +} +func (c *F) SkipNow() { + panic("GO-FUZZ-BUILD-PANIC") +} +func (c *F) Skipf(format string, args ...any) { + panic("GO-FUZZ-BUILD-PANIC") +} +func (f *F) Skipped() bool { return false } + +func (f *F) TempDir() string { + dir, err := os.MkdirTemp("", "fuzzdir-") + if err != nil { + panic(err) + } + f.T.TempDirs = append(f.T.TempDirs, dir) + + return dir +} diff --git a/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/t.go b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/t.go new file mode 100644 index 0000000000..885fdb3be6 --- /dev/null +++ b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/t.go @@ -0,0 +1,129 @@ +package testing + +import ( + "fmt" + "os" + "strings" + "time" +) + +// T can be used to terminate the current fuzz iteration +// without terminating the whole fuzz run. To do so, simply +// panic with the text "GO-FUZZ-BUILD-PANIC" and the fuzzer +// will recover. +type T struct { + TempDirs []string +} + +func NewT() *T { + tempDirs := make([]string, 0) + return &T{TempDirs: tempDirs} +} + +func unsupportedApi(name string) string { + plsOpenIss := "Please open an issue https://github.com/AdamKorcz/go-118-fuzz-build if you need this feature." + var b strings.Builder + b.WriteString(fmt.Sprintf("%s is not supported when fuzzing in libFuzzer mode\n.", name)) + b.WriteString(plsOpenIss) + return b.String() +} + +func (t *T) Cleanup(f func()) { + f() +} + +func (t *T) Deadline() (deadline time.Time, ok bool) { + panic(unsupportedApi("t.Deadline()")) +} + +func (t *T) Error(args ...any) { + fmt.Println(args...) + panic("error") +} + +func (t *T) Errorf(format string, args ...any) { + fmt.Printf(format+"\n", args...) + panic("errorf") +} + +func (t *T) Fail() { + panic("Called T.Fail()") +} + +func (t *T) FailNow() { + panic("Called T.Fail()") + panic(unsupportedApi("t.FailNow()")) +} + +func (t *T) Failed() bool { + panic(unsupportedApi("t.Failed()")) +} + +func (t *T) Fatal(args ...any) { + fmt.Println(args...) + panic("fatal") +} +func (t *T) Fatalf(format string, args ...any) { + fmt.Printf(format+"\n", args...) + panic("fatal") +} +func (t *T) Helper() { + // We can't support it, but it also just impacts how failures are reported, so we can ignore it +} +func (t *T) Log(args ...any) { + fmt.Println(args...) +} + +func (t *T) Logf(format string, args ...any) { + fmt.Println(format) + fmt.Println(args...) +} + +func (t *T) Name() string { + return "libFuzzer" +} + +func (t *T) Parallel() { + panic(unsupportedApi("t.Parallel()")) +} +func (t *T) Run(name string, f func(t *T)) bool { + panic(unsupportedApi("t.Run()")) +} + +func (t *T) Setenv(key, value string) { + +} + +func (t *T) Skip(args ...any) { + panic("GO-FUZZ-BUILD-PANIC") +} +func (t *T) SkipNow() { + panic("GO-FUZZ-BUILD-PANIC") +} + +// Is not really supported. We just skip instead +// of printing any message. A log message can be +// added if need be. +func (t *T) Skipf(format string, args ...any) { + panic("GO-FUZZ-BUILD-PANIC") +} +func (t *T) Skipped() bool { + panic(unsupportedApi("t.Skipped()")) +} +func (t *T) TempDir() string { + dir, err := os.MkdirTemp("", "fuzzdir-") + if err != nil { + panic(err) + } + t.TempDirs = append(t.TempDirs, dir) + + return dir +} + +func (t *T) CleanupTempDirs() { + if len(t.TempDirs) > 0 { + for _, tempDir := range t.TempDirs { + os.RemoveAll(tempDir) + } + } +} diff --git a/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/unsupported_funcs.go b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/unsupported_funcs.go new file mode 100644 index 0000000000..310c22893c --- /dev/null +++ b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/unsupported_funcs.go @@ -0,0 +1,42 @@ +package testing + +import ( + "testing" +) + +func AllocsPerRun(runs int, f func()) (avg float64) { + panic(unsupportedApi("testing.AllocsPerRun")) +} +func CoverMode() string { + panic(unsupportedApi("testing.CoverMode")) +} +func Coverage() float64 { + panic(unsupportedApi("testing.Coverage")) +} +func Init() { + panic(unsupportedApi("testing.Init")) + +} +func RegisterCover(c testing.Cover) { + panic(unsupportedApi("testing.RegisterCover")) +} +func RunExamples(matchString func(pat, str string) (bool, error), examples []testing.InternalExample) (ok bool) { + panic(unsupportedApi("testing.RunExamples")) +} + +func RunTests(matchString func(pat, str string) (bool, error), tests []testing.InternalTest) (ok bool) { + panic(unsupportedApi("testing.RunTests")) +} + +func Short() bool { + return false +} + +func Verbose() bool { + panic(unsupportedApi("testing.Verbose")) +} + +type M struct {} +func (m *M) Run() (code int) { + panic("testing.M is not support in libFuzzer Mode") +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/bindfilter/bind_filter.go b/vendor/github.com/Microsoft/go-winio/pkg/bindfilter/bind_filter.go new file mode 100644 index 0000000000..7ac377ae46 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/bindfilter/bind_filter.go @@ -0,0 +1,308 @@ +//go:build windows +// +build windows + +package bindfilter + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./bind_filter.go +//sys bfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath string, virtTargetPath string, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) = bindfltapi.BfSetupFilter? +//sys bfRemoveMapping(jobHandle windows.Handle, virtRootPath string) (hr error) = bindfltapi.BfRemoveMapping? +//sys bfGetMappings(flags uint32, jobHandle windows.Handle, virtRootPath *uint16, sid *windows.SID, bufferSize *uint32, outBuffer *byte) (hr error) = bindfltapi.BfGetMappings? + +// BfSetupFilter flags. See: +// https://github.com/microsoft/BuildXL/blob/a6dce509f0d4f774255e5fbfb75fa6d5290ed163/Public/Src/Utilities/Native/Processes/Windows/NativeContainerUtilities.cs#L193-L240 +// +//nolint:revive // var-naming: ALL_CAPS +const ( + BINDFLT_FLAG_READ_ONLY_MAPPING uint32 = 0x00000001 + // Tells bindflt to fail mapping with STATUS_INVALID_PARAMETER if a mapping produces + // multiple targets. + BINDFLT_FLAG_NO_MULTIPLE_TARGETS uint32 = 0x00000040 +) + +//nolint:revive // var-naming: ALL_CAPS +const ( + BINDFLT_GET_MAPPINGS_FLAG_VOLUME uint32 = 0x00000001 + BINDFLT_GET_MAPPINGS_FLAG_SILO uint32 = 0x00000002 + BINDFLT_GET_MAPPINGS_FLAG_USER uint32 = 0x00000004 +) + +// ApplyFileBinding creates a global mount of the source in root, with an optional +// read only flag. +// The bind filter allows us to create mounts of directories and volumes. By default it allows +// us to mount multiple sources inside a single root, acting as an overlay. Files from the +// second source will superscede the first source that was mounted. +// This function disables this behavior and sets the BINDFLT_FLAG_NO_MULTIPLE_TARGETS flag +// on the mount. +func ApplyFileBinding(root, source string, readOnly bool) error { + // The parent directory needs to exist for the bind to work. MkdirAll stats and + // returns nil if the directory exists internally so we should be fine to mkdirall + // every time. + if err := os.MkdirAll(filepath.Dir(root), 0); err != nil { + return err + } + + if strings.Contains(source, "Volume{") && !strings.HasSuffix(source, "\\") { + // Add trailing slash to volumes, otherwise we get an error when binding it to + // a folder. + source = source + "\\" + } + + flags := BINDFLT_FLAG_NO_MULTIPLE_TARGETS + if readOnly { + flags |= BINDFLT_FLAG_READ_ONLY_MAPPING + } + + // Set the job handle to 0 to create a global mount. + if err := bfSetupFilter( + 0, + flags, + root, + source, + nil, + 0, + ); err != nil { + return fmt.Errorf("failed to bind target %q to root %q: %w", source, root, err) + } + return nil +} + +// RemoveFileBinding removes a mount from the root path. +func RemoveFileBinding(root string) error { + if err := bfRemoveMapping(0, root); err != nil { + return fmt.Errorf("removing file binding: %w", err) + } + return nil +} + +// GetBindMappings returns a list of bind mappings that have their root on a +// particular volume. The volumePath parameter can be any path that exists on +// a volume. For example, if a number of mappings are created in C:\ProgramData\test, +// to get a list of those mappings, the volumePath parameter would have to be set to +// C:\ or the VOLUME_NAME_GUID notation of C:\ (\\?\Volume{GUID}\), or any child +// path that exists. +func GetBindMappings(volumePath string) ([]BindMapping, error) { + rootPtr, err := windows.UTF16PtrFromString(volumePath) + if err != nil { + return nil, err + } + + flags := BINDFLT_GET_MAPPINGS_FLAG_VOLUME + // allocate a large buffer for results + var outBuffSize uint32 = 256 * 1024 + buf := make([]byte, outBuffSize) + + if err := bfGetMappings(flags, 0, rootPtr, nil, &outBuffSize, &buf[0]); err != nil { + return nil, err + } + + if outBuffSize < 12 { + return nil, fmt.Errorf("invalid buffer returned") + } + + result := buf[:outBuffSize] + + // The first 12 bytes are the three uint32 fields in getMappingsResponseHeader{} + headerBuffer := result[:12] + // The alternative to using unsafe and casting it to the above defined structures, is to manually + // parse the fields. Not too terrible, but not sure it'd worth the trouble. + header := *(*getMappingsResponseHeader)(unsafe.Pointer(&headerBuffer[0])) + + if header.MappingCount == 0 { + // no mappings + return []BindMapping{}, nil + } + + mappingsBuffer := result[12 : int(unsafe.Sizeof(mappingEntry{}))*int(header.MappingCount)] + // Get a pointer to the first mapping in the slice + mappingsPointer := (*mappingEntry)(unsafe.Pointer(&mappingsBuffer[0])) + // Get slice of mappings + mappings := unsafe.Slice(mappingsPointer, header.MappingCount) + + mappingEntries := make([]BindMapping, header.MappingCount) + for i := 0; i < int(header.MappingCount); i++ { + bindMapping, err := getBindMappingFromBuffer(result, mappings[i]) + if err != nil { + return nil, fmt.Errorf("fetching bind mappings: %w", err) + } + mappingEntries[i] = bindMapping + } + + return mappingEntries, nil +} + +// mappingEntry holds information about where in the response buffer we can +// find information about the virtual root (the mount point) and the targets (sources) +// that get mounted, as well as the flags used to bind the targets to the virtual root. +type mappingEntry struct { + VirtRootLength uint32 + VirtRootOffset uint32 + Flags uint32 + NumberOfTargets uint32 + TargetEntriesOffset uint32 +} + +type mappingTargetEntry struct { + TargetRootLength uint32 + TargetRootOffset uint32 +} + +// getMappingsResponseHeader represents the first 12 bytes of the BfGetMappings() response. +// It gives us the size of the buffer, the status of the call and the number of mappings. +// A response +type getMappingsResponseHeader struct { + Size uint32 + Status uint32 + MappingCount uint32 +} + +type BindMapping struct { + MountPoint string + Flags uint32 + Targets []string +} + +func decodeEntry(buffer []byte) (string, error) { + name := make([]uint16, len(buffer)/2) + err := binary.Read(bytes.NewReader(buffer), binary.LittleEndian, &name) + if err != nil { + return "", fmt.Errorf("decoding name: %w", err) + } + return windows.UTF16ToString(name), nil +} + +func getTargetsFromBuffer(buffer []byte, offset, count int) ([]string, error) { + if len(buffer) < offset+count*6 { + return nil, fmt.Errorf("invalid buffer") + } + + targets := make([]string, count) + for i := 0; i < count; i++ { + entryBuf := buffer[offset+i*8 : offset+i*8+8] + tgt := *(*mappingTargetEntry)(unsafe.Pointer(&entryBuf[0])) + if len(buffer) < int(tgt.TargetRootOffset)+int(tgt.TargetRootLength) { + return nil, fmt.Errorf("invalid buffer") + } + decoded, err := decodeEntry(buffer[tgt.TargetRootOffset : tgt.TargetRootOffset+tgt.TargetRootLength]) + if err != nil { + return nil, fmt.Errorf("decoding name: %w", err) + } + decoded, err = getFinalPath(decoded) + if err != nil { + return nil, fmt.Errorf("fetching final path: %w", err) + } + + targets[i] = decoded + } + return targets, nil +} + +func getFinalPath(pth string) (string, error) { + // BfGetMappings returns VOLUME_NAME_NT paths like \Device\HarddiskVolume2\ProgramData. + // These can be accessed by prepending \\.\GLOBALROOT to the path. We use this to get the + // DOS paths for these files. + if strings.HasPrefix(pth, `\Device`) { + pth = `\\.\GLOBALROOT` + pth + } + + han, err := openPath(pth) + if err != nil { + return "", fmt.Errorf("fetching file handle: %w", err) + } + defer func() { + _ = windows.CloseHandle(han) + }() + + buf := make([]uint16, 100) + var flags uint32 = 0x0 + for { + n, err := windows.GetFinalPathNameByHandle(han, &buf[0], uint32(len(buf)), flags) + if err != nil { + // if we mounted a volume that does not also have a drive letter assigned, attempting to + // fetch the VOLUME_NAME_DOS will fail with os.ErrNotExist. Attempt to get the VOLUME_NAME_GUID. + if errors.Is(err, os.ErrNotExist) && flags != 0x1 { + flags = 0x1 + continue + } + return "", fmt.Errorf("getting final path name: %w", err) + } + if n < uint32(len(buf)) { + break + } + buf = make([]uint16, n) + } + finalPath := syscall.UTF16ToString(buf) + // We got VOLUME_NAME_DOS, we need to strip away some leading slashes. + // Leave unchanged if we ended up requesting VOLUME_NAME_GUID + if len(finalPath) > 4 && finalPath[:4] == `\\?\` && flags == 0x0 { + finalPath = finalPath[4:] + if len(finalPath) > 3 && finalPath[:3] == `UNC` { + // return path like \\server\share\... + finalPath = `\` + finalPath[3:] + } + } + + return finalPath, nil +} + +func getBindMappingFromBuffer(buffer []byte, entry mappingEntry) (BindMapping, error) { + if len(buffer) < int(entry.VirtRootOffset)+int(entry.VirtRootLength) { + return BindMapping{}, fmt.Errorf("invalid buffer") + } + + src, err := decodeEntry(buffer[entry.VirtRootOffset : entry.VirtRootOffset+entry.VirtRootLength]) + if err != nil { + return BindMapping{}, fmt.Errorf("decoding entry: %w", err) + } + targets, err := getTargetsFromBuffer(buffer, int(entry.TargetEntriesOffset), int(entry.NumberOfTargets)) + if err != nil { + return BindMapping{}, fmt.Errorf("fetching targets: %w", err) + } + + src, err = getFinalPath(src) + if err != nil { + return BindMapping{}, fmt.Errorf("fetching final path: %w", err) + } + + return BindMapping{ + Flags: entry.Flags, + Targets: targets, + MountPoint: src, + }, nil +} + +func openPath(path string) (windows.Handle, error) { + u16, err := windows.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + h, err := windows.CreateFile( + u16, + 0, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, + windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS, // Needed to open a directory handle. + 0) + if err != nil { + return 0, &os.PathError{ + Op: "CreateFile", + Path: path, + Err: err, + } + } + return h, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/bindfilter/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/bindfilter/zsyscall_windows.go new file mode 100644 index 0000000000..45c45c96e4 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/bindfilter/zsyscall_windows.go @@ -0,0 +1,116 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package bindfilter + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modbindfltapi = windows.NewLazySystemDLL("bindfltapi.dll") + + procBfGetMappings = modbindfltapi.NewProc("BfGetMappings") + procBfRemoveMapping = modbindfltapi.NewProc("BfRemoveMapping") + procBfSetupFilter = modbindfltapi.NewProc("BfSetupFilter") +) + +func bfGetMappings(flags uint32, jobHandle windows.Handle, virtRootPath *uint16, sid *windows.SID, bufferSize *uint32, outBuffer *byte) (hr error) { + hr = procBfGetMappings.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procBfGetMappings.Addr(), 6, uintptr(flags), uintptr(jobHandle), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(bufferSize)), uintptr(unsafe.Pointer(outBuffer))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func bfRemoveMapping(jobHandle windows.Handle, virtRootPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(virtRootPath) + if hr != nil { + return + } + return _bfRemoveMapping(jobHandle, _p0) +} + +func _bfRemoveMapping(jobHandle windows.Handle, virtRootPath *uint16) (hr error) { + hr = procBfRemoveMapping.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procBfRemoveMapping.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(virtRootPath)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func bfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath string, virtTargetPath string, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(virtRootPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(virtTargetPath) + if hr != nil { + return + } + return _bfSetupFilter(jobHandle, flags, _p0, _p1, virtExceptions, virtExceptionPathCount) +} + +func _bfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16, virtTargetPath *uint16, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) { + hr = procBfSetupFilter.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procBfSetupFilter.Addr(), 6, uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/containerd/containerd/.cirrus.yml b/vendor/github.com/containerd/containerd/.cirrus.yml new file mode 100644 index 0000000000..db7d2dd916 --- /dev/null +++ b/vendor/github.com/containerd/containerd/.cirrus.yml @@ -0,0 +1,82 @@ +# Cirrus CI gives open-source projects free 16.0 CPUs, +# we use 4 CPUs x 3 tasks = 12 CPUs. +# https://cirrus-ci.org/faq/#are-there-any-limits +# +# Undocumented constraints; +# - The maximum memory limit is 4G times the number of CPUs. +# - The number of CPUs should be multiple of 2. + +task: + name: Vagrant + + compute_engine_instance: + image_project: cirrus-images + image: family/docker-kvm + platform: linux + nested_virtualization: true + cpu: 4 + memory: 16G + + env: + GOTEST: gotestsum -- + # By default, Cirrus CI doesn't have HOME defined + HOME: /root + matrix: + BOX: fedora/37-cloud-base + # v7.0.0 does not boot. v6.0.0 was not released. + BOX: rockylinux/8@5.0.0 + install_libvirt_vagrant_script: | + # if another process is keeping a lock, wait for 60 seconds for it to release the lock. + apt-get -o DPkg::Lock::Timeout=60 update + apt-get -o DPkg::Lock::Timeout=60 install -y libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt + systemctl enable --now libvirtd + + vagrant_cache: + folder: /root/.vagrant.d + fingerprint_script: uname --kernel-release --kernel-version && cat Vagrantfile + + vagrant_up_script: | + vagrant up --no-tty + + integration_script: | + vagrant up --provision-with=selinux,install-runc,install-gotestsum,test-integration + + cri_integration_script: | + vagrant up --provision-with=selinux,install-runc,install-gotestsum,test-cri-integration + + cri_test_script: | + vagrant up --provision-with=selinux,install-runc,install-gotestsum,test-cri + +task: + name: CGroupsV2 - rootless CRI test + + env: + HOME: /root + + compute_engine_instance: + image_project: cirrus-images + image: family/docker-kvm + platform: linux + nested_virtualization: true + cpu: 4 + memory: 16G + + install_libvirt_vagrant_script: | + # if another process is keeping a lock, wait for 60 seconds for it to release the lock. + apt-get -o DPkg::Lock::Timeout=60 update + apt-get -o DPkg::Lock::Timeout=60 install -y libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt + systemctl enable --now libvirtd + + vagrant_cache: + folder: /root/.vagrant.d + fingerprint_script: uname -a; cat Vagrantfile + + vagrant_up_script: | + vagrant up --provision-with=install-rootless-podman --no-tty + + podman_build_script: | + # Execute rootless podman to create the UserNS env + vagrant ssh -- podman build --target cri-in-userns -t cri-in-userns -f /vagrant/contrib/Dockerfile.test /vagrant + + test_script: | + vagrant ssh -- podman run --rm --privileged cri-in-userns diff --git a/vendor/github.com/containerd/containerd/.golangci.yml b/vendor/github.com/containerd/containerd/.golangci.yml index e162f0aef5..e52c4ffb94 100644 --- a/vendor/github.com/containerd/containerd/.golangci.yml +++ b/vendor/github.com/containerd/containerd/.golangci.yml @@ -25,6 +25,10 @@ issues: # Only using / doesn't work due to https://github.com/golangci/golangci-lint/issues/1398. exclude-rules: + - path: 'cmd[\\/]containerd[\\/]builtins[\\/]' + text: "blank-imports:" + - path: 'contrib[\\/]fuzz[\\/]' + text: "exported: func name will be used as fuzz.Fuzz" - path: 'archive[\\/]tarheader[\\/]' # conversion is necessary on Linux, unnecessary on macOS text: "unnecessary conversion" diff --git a/vendor/github.com/containerd/containerd/.mailmap b/vendor/github.com/containerd/containerd/.mailmap index 3988d4797a..34281d429e 100644 --- a/vendor/github.com/containerd/containerd/.mailmap +++ b/vendor/github.com/containerd/containerd/.mailmap @@ -1,10 +1,13 @@ Abhinandan Prativadi Abhinandan Prativadi Ace-Tang +Adam Korcz +Aditi Sharma Akihiro Suda Akihiro Suda Allen Sun Alexander Morozov +Antonio Ojea Antonio Ojea Amit Krishnan Andrei Vagin @@ -26,9 +29,12 @@ Daniel Dao Derek McGowan Edgar Lee Eric Ernst +Eric Lin Eric Ren Eric Ren Eric Ren +Fabian Hoffmann +Fabian Hoffmann <35104465+FabHof@users.noreply.github.com> Fabiano Fidêncio Fahed Dorgaa Frank Yang @@ -37,6 +43,7 @@ Fupan Li Fupan Li Furkan Türkal Georgia Panoutsakopoulou +guodong Guangming Wang Haiyan Meng haoyun @@ -51,12 +58,16 @@ Jian Liao Jian Liao Ji'an Liu Jie Zhang +Jiongchi Yu John Howard John Howard John Howard John Howard +Junyu Liu +LongtaoZhang Lorenz Brun Luc Perkins +James Sturtevant Jiajun Jiang Julien Balestra Jun Lin Chen <1913688+mc256@users.noreply.github.com> @@ -64,15 +75,19 @@ Justin Cormack Justin Terry Justin Terry Kante +Kazuyoshi Kato +Kazuyoshi Kato Kenfe-Mickaël Laventure Kevin Kern Kevin Parsons Kevin Xu +Kirtana Ashok Kitt Hsu Kohei Tokunaga Krasi Georgiev Lantao Liu Lantao Liu +lengrongfu <1275177125@qq.com> Li Yuxuan Lifubang Lu Jingxiao @@ -84,6 +99,7 @@ Mario Hros Mario Macias Mark Gordon Marvin Giessing +Mathis Michel Michael Crosby Michael Katsoulis Mike Brown @@ -96,6 +112,7 @@ Nishchay Kumar Oliver Stenbom Phil Estes Phil Estes +Qian Zhang Reid Li Robin Winkelewski Ross Boucher @@ -121,20 +138,24 @@ Su Xiaolin Takumasa Sakao Ted Yu Tõnis Tiigi +Tony Fang +Tony Fang Wade Lee Wade Lee Wade Lee <21621232@zju.edu.cn> -Wang Bing wanglei wanglei wangzhan Wei Fu Wei Fu +wen chen Xiaodong Zhang Xuean Yan Yang Yang Yue Zhang Yuxing Liu +Zechun Chen +zhang he Zhang Wei zhangyadong Zhenguang Zhu diff --git a/vendor/github.com/containerd/containerd/ADOPTERS.md b/vendor/github.com/containerd/containerd/ADOPTERS.md index bbf99e7dd5..c5e60df7b4 100644 --- a/vendor/github.com/containerd/containerd/ADOPTERS.md +++ b/vendor/github.com/containerd/containerd/ADOPTERS.md @@ -12,6 +12,8 @@ including the Balena project listed below. **_[IBM Cloud Private (ICP)](https://www.ibm.com/cloud/private)_** - IBM's on-premises cloud offering has containerd as a "tech preview" CRI runtime for the Kubernetes offered within this product for the past two releases, and plans to fully migrate to containerd in a future release. +**_[Google Container-Optimized OS (COS)](https://cloud.google.com/container-optimized-os/docs)_** - Container-Optimized OS is a Linux Operating System from Google that is optimized for running containers. COS has used containerd as container runtime when containerd was part of Docker's core container runtime. + **_[Google Cloud Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/)_** - containerd has been offered in GKE since version 1.14 and has been the default runtime since version 1.19. It is also the only supported runtime for GKE Autopilot from the launch. [More details](https://cloud.google.com/kubernetes-engine/docs/concepts/using-containerd) **_[AWS Fargate](https://aws.amazon.com/fargate)_** - uses containerd + Firecracker (noted below) as the runtime and isolation technology for containers run in the Fargate platform. Fargate is a serverless, container-native compute offering from Amazon Web Services. @@ -36,7 +38,7 @@ including the Balena project listed below. **_BuildKit_** - The Moby project's [BuildKit](https://github.com/moby/buildkit) can use either runC or containerd as build execution backends for building container images. BuildKit support has also been built into the Docker engine in recent releases, making BuildKit provide the backend to the `docker build` command. -**_[Azure Kubernetes Service (AKS)](https://azure.microsoft.com/services/kubernetes-service)_** - Microsoft's managed Kubernetes offering uses containerd for Linux nodes running v1.19 or greater. Containerd for Windows nodes is currently in public preview. [More Details](https://docs.microsoft.com/azure/aks/cluster-configuration#container-runtime-configuration) +**_[Azure Kubernetes Service (AKS)](https://azure.microsoft.com/services/kubernetes-service)_** - Microsoft's managed Kubernetes offering uses containerd for Linux nodes running v1.19 and greater, and Windows nodes running 1.20 and greater. [More Details](https://docs.microsoft.com/azure/aks/cluster-configuration#container-runtime-configuration) **_Amazon Firecracker_** - The AWS [Firecracker VMM project](http://firecracker-microvm.io/) has extended containerd with a new snapshotter and v2 shim to allow containerd to drive virtualized container processes via their VMM implementation. More details on their containerd integration are available in [their GitHub project](https://github.com/firecracker-microvm/firecracker-containerd). @@ -52,6 +54,8 @@ including the Balena project listed below. **_[Talos Linux](https://www.talos.dev/)_** - Talos Linux is Linux designed for Kubernetes – secure, immutable, and minimal. Talos Linux is using containerd as the core system runtime and CRI implementation. +**_Deckhouse_** - [Deckhouse Kubernetes Platform](https://deckhouse.io/) from Flant allows you to manage Kubernetes clusters anywhere in a fully automatic and uniform fashion. It uses containerd as the default CRI runtime. + **_Other Projects_** - While the above list provides a cross-section of well known uses of containerd, the simplicity and clear API layer for containerd has inspired many smaller projects around providing simple container management platforms. Several examples of building higher layer functionality on top of the containerd base have come from various containerd community participants: - Michael Crosby's [boss](https://github.com/crosbymichael/boss) project, - Evan Hazlett's [stellar](https://github.com/ehazlett/stellar) project, diff --git a/vendor/github.com/containerd/containerd/BUILDING.md b/vendor/github.com/containerd/containerd/BUILDING.md index 286164dc26..ac9a2716fa 100644 --- a/vendor/github.com/containerd/containerd/BUILDING.md +++ b/vendor/github.com/containerd/containerd/BUILDING.md @@ -18,6 +18,8 @@ To build the `containerd` daemon, and the `ctr` simple test client, the followin * Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/protocolbuffers/protobuf/releases)) * Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via the build tag `no_btrfs`, removing this dependency. +> *Note*: On macOS, you need a third party runtime to run containers on containerd + ## Build the development environment First you need to setup your Go development environment. You can follow this @@ -37,12 +39,16 @@ wget -c https://github.com/protocolbuffers/protobuf/releases/download/v3.11.4/pr sudo unzip protoc-3.11.4-linux-x86_64.zip -d /usr/local ``` -`containerd` uses [Btrfs](https://en.wikipedia.org/wiki/Btrfs) it means that you -need to satisfy these dependencies in your system: +To enable optional [Btrfs](https://en.wikipedia.org/wiki/Btrfs) snapshotter, you should have the headers from the Linux kernel 4.12 or later. +The dependency on the kernel headers only affects users building containerd from source. +Users on older kernels may opt to not compile the btrfs support (see `BUILDTAGS=no_btrfs` below), +or to provide headers from a newer kernel. -* CentOS/Fedora: `yum install btrfs-progs-devel` -* Debian/Ubuntu: `apt-get install btrfs-progs libbtrfs-dev` - * Debian(before Buster)/Ubuntu(before 19.10): `apt-get install btrfs-tools` +> **Note** +> The dependency on the Linux kernel headers 4.12 was introduced in containerd 1.7.0-beta.4. +> +> containerd 1.6 has different set of dependencies for enabling btrfs. +> containerd 1.6 users should refer to https://github.com/containerd/containerd/blob/release/1.6/BUILDING.md#build-the-development-environment At this point you are ready to build `containerd` yourself! @@ -54,6 +60,8 @@ the system, sometimes it is necessary to build runc directly when working with container runtime development. Make sure to follow the guidelines for versioning in [RUNC.md](/docs/RUNC.md) for the best results. +> *Note*: Runc only supports Linux + ## Build containerd `containerd` uses `make` to create a repeatable build flow. It means that you @@ -117,6 +125,8 @@ Changes to these files should become a single commit for a PR which relies on ve Please refer to [RUNC.md](/docs/RUNC.md) for the currently supported version of `runc` that is used by containerd. +> *Note*: On macOS, the containerd daemon can be built and run natively. However, as stated above, runc only supports linux. + ### Static binaries You can build static binaries by providing a few variables to `make`: @@ -141,9 +151,6 @@ You can build an image from this `Dockerfile`: ```dockerfile FROM golang - -RUN apt-get update && \ - apt-get install -y libbtrfs-dev ``` Let's suppose that you built an image called `containerd/build`. From the @@ -180,7 +187,7 @@ We can build an image from this `Dockerfile`: FROM golang RUN apt-get update && \ - apt-get install -y libbtrfs-dev libseccomp-dev + apt-get install -y libseccomp-dev ``` In our Docker container we will build `runc` build, which includes @@ -236,6 +243,7 @@ During the automated CI the unit tests and integration tests are run as part of - `make test`: run all non-integration tests that do not require `root` privileges - `make root-test`: run all non-integration tests which require `root` - `make integration`: run all tests, including integration tests and those which require `root`. `TESTFLAGS_PARALLEL` can be used to control parallelism. For example, `TESTFLAGS_PARALLEL=1 make integration` will lead a non-parallel execution. The default value of `TESTFLAGS_PARALLEL` is **8**. + - `make cri-integration`: [CRI Integration Tests](https://github.com/containerd/containerd/blob/main/docs/cri/testing.md#cri-integration-test) run cri integration tests To execute a specific test or set of tests you can use the `go test` capabilities without using the `Makefile` targets. The following examples show how to specify a test @@ -271,7 +279,7 @@ In addition to `go test`-based testing executed via the `Makefile` targets, the With this tool you can stress a running containerd daemon for a specified period of time, selecting a concurrency level to generate stress against the daemon. The following command is an example of having five workers running for two hours against a default containerd gRPC socket address: ```sh -containerd-stress -c 5 -t 120 +containerd-stress -c 5 -d 120m ``` For more information on this tool's options please run `containerd-stress --help`. diff --git a/vendor/github.com/containerd/containerd/Makefile b/vendor/github.com/containerd/containerd/Makefile index f1b28ceb9c..02a8aa2027 100644 --- a/vendor/github.com/containerd/containerd/Makefile +++ b/vendor/github.com/containerd/containerd/Makefile @@ -75,6 +75,7 @@ WHALE = "🇩" ONI = "👹" RELEASE=containerd-$(VERSION:v%=%)-${GOOS}-${GOARCH} +STATICRELEASE=containerd-static-$(VERSION:v%=%)-${GOOS}-${GOARCH} CRIRELEASE=cri-containerd-$(VERSION:v%=%)-${GOOS}-${GOARCH} CRICNIRELEASE=cri-containerd-cni-$(VERSION:v%=%)-${GOOS}-${GOARCH} @@ -88,6 +89,7 @@ ifdef BUILDTAGS GO_BUILDTAGS = ${BUILDTAGS} endif GO_BUILDTAGS ?= +GO_BUILDTAGS += urfave_cli_no_docs GO_BUILDTAGS += ${DEBUG_TAGS} ifneq ($(STATIC),) GO_BUILDTAGS += osusergo netgo static_build @@ -122,7 +124,7 @@ ifdef SKIPTESTS endif #Replaces ":" (*nix), ";" (windows) with newline for easy parsing -GOPATHS=$(shell echo ${GOPATH} | tr ":" "\n" | tr ";" "\n") +GOPATHS=$(shell go env GOPATH | tr ":" "\n" | tr ";" "\n") TESTFLAGS_RACE= GO_BUILD_FLAGS= @@ -147,7 +149,7 @@ GOTEST ?= $(GO) test OUTPUTDIR = $(join $(ROOTDIR), _output) CRIDIR=$(OUTPUTDIR)/cri -.PHONY: clean all AUTHORS build binaries test integration generate protos checkprotos coverage ci check help install uninstall vendor release mandir install-man genman install-cri-deps cri-release cri-cni-release cri-integration install-deps bin/cri-integration.test +.PHONY: clean all AUTHORS build binaries test integration generate protos check-protos coverage ci check help install uninstall vendor release static-release mandir install-man genman install-cri-deps cri-release cri-cni-release cri-integration install-deps bin/cri-integration.test .DEFAULT: default # Forcibly set the default goal to all, in case an include above brought in a rule definition. @@ -159,7 +161,7 @@ check: proto-fmt ## run all linters @echo "$(WHALE) $@" GOGC=75 golangci-lint run -ci: check binaries checkprotos coverage coverage-integration ## to be used by the CI +ci: check binaries check-protos coverage coverage-integration ## to be used by the CI AUTHORS: .mailmap .git/HEAD git log --format='%aN <%aE>' | sort -fu > $@ @@ -168,7 +170,7 @@ generate: protos @echo "$(WHALE) $@" @PATH="${ROOTDIR}/bin:${PATH}" $(GO) generate -x ${PACKAGES} -protos: bin/protoc-gen-gogoctrd ## generate protobuf +protos: bin/protoc-gen-go-fieldpath @echo "$(WHALE) $@" @find . -path ./vendor -prune -false -o -name '*.pb.go' | xargs rm $(eval TMPDIR := $(shell mktemp -d)) @@ -177,6 +179,7 @@ protos: bin/protoc-gen-gogoctrd ## generate protobuf @(PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${NON_API_PACKAGES}) @mv ${TMPDIR}/vendor ${ROOTDIR} @rm -rf ${TMPDIR} + go-fix-acronym -w -a '(Id|Io|Uuid|Os)$$' $(shell find api/ runtime/ -name '*.pb.go') check-protos: protos ## check if protobufs needs to be generated again @echo "$(WHALE) $@" @@ -194,8 +197,6 @@ proto-fmt: ## check format of proto files @echo "$(WHALE) $@" @test -z "$$(find . -path ./vendor -prune -o -path ./protobuf/google/rpc -prune -o -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \ (echo "$(ONI) please indent proto files with tabs only" && false) - @test -z "$$(find . -path ./vendor -prune -o -name '*.proto' -type f -exec grep -Hn "Meta meta = " {} \; | grep -v '(gogoproto.nullable) = false' | tee /dev/stderr)" || \ - (echo "$(ONI) meta fields in proto files must have option (gogoproto.nullable) = false" && false) build: ## build the go packages @echo "$(WHALE) $@" @@ -218,9 +219,9 @@ bin/cri-integration.test: @echo "$(WHALE) $@" @$(GO) test -c ./integration -o bin/cri-integration.test -cri-integration: binaries bin/cri-integration.test ## run cri integration tests +cri-integration: binaries bin/cri-integration.test ## run cri integration tests (example: FOCUS=TestContainerListStats make cri-integration) @echo "$(WHALE) $@" - @bash -x ./script/test/cri-integration.sh + @bash ./script/test/cri-integration.sh @rm -rf bin/cri-integration.test # build runc shimv2 with failpoint control, only used by integration test @@ -241,13 +242,18 @@ FORCE: define BUILD_BINARY @echo "$(WHALE) $@" -@$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@ ${GO_LDFLAGS} ${GO_TAGS} ./$< +$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@ ${GO_LDFLAGS} ${GO_TAGS} ./$< endef # Build a binary from a cmd. bin/%: cmd/% FORCE $(call BUILD_BINARY) +# gen-manpages must not have the urfave_cli_no_docs build-tag set +bin/gen-manpages: cmd/gen-manpages FORCE + @echo "$(WHALE) $@" + $(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@ ${GO_LDFLAGS} $(subst urfave_cli_no_docs,,${GO_TAGS}) ./cmd/gen-manpages + bin/containerd-shim: cmd/containerd-shim FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220 @echo "$(WHALE) $@" @CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim @@ -272,13 +278,13 @@ mandir: # Kept for backwards compatibility genman: man/containerd.8 man/ctr.8 -man/containerd.8: FORCE +man/containerd.8: bin/gen-manpages FORCE @echo "$(WHALE) $@" - $(GO) run -mod=readonly ${GO_TAGS} cmd/gen-manpages/main.go $(@F) $(@D) + $< $(@F) $(@D) -man/ctr.8: FORCE +man/ctr.8: bin/gen-manpages FORCE @echo "$(WHALE) $@" - $(GO) run -mod=readonly ${GO_TAGS} cmd/gen-manpages/main.go $(@F) $(@D) + $< $(@F) $(@D) man/%: docs/man/%.md FORCE @echo "$(WHALE) $@" @@ -294,18 +300,40 @@ install-man: man $(foreach manpage,$(addprefix man/,$(MANPAGES)), $(call installmanpage,$(manpage),$(subst .,,$(suffix $(manpage))),$(notdir $(manpage)))) +define pack_release + @rm -rf releases/$(1) releases/$(1).tar.gz + @$(INSTALL) -d releases/$(1)/bin + @$(INSTALL) $(BINARIES) releases/$(1)/bin + @tar -czf releases/$(1).tar.gz -C releases/$(1) bin + @rm -rf releases/$(1) +endef + + releases/$(RELEASE).tar.gz: $(BINARIES) @echo "$(WHALE) $@" - @rm -rf releases/$(RELEASE) releases/$(RELEASE).tar.gz - @$(INSTALL) -d releases/$(RELEASE)/bin - @$(INSTALL) $(BINARIES) releases/$(RELEASE)/bin - @tar -czf releases/$(RELEASE).tar.gz -C releases/$(RELEASE) bin - @rm -rf releases/$(RELEASE) + $(call pack_release,$(RELEASE)) release: releases/$(RELEASE).tar.gz @echo "$(WHALE) $@" @cd releases && sha256sum $(RELEASE).tar.gz >$(RELEASE).tar.gz.sha256sum +releases/$(STATICRELEASE).tar.gz: +ifeq ($(GOOS),linux) + @make STATIC=1 $(BINARIES) + @echo "$(WHALE) $@" + $(call pack_release,$(STATICRELEASE)) +else + @echo "Skipping $(STATICRELEASE) for $(GOOS)" +endif + +static-release: releases/$(STATICRELEASE).tar.gz +ifeq ($(GOOS),linux) + @echo "$(WHALE) $@" + @cd releases && sha256sum $(STATICRELEASE).tar.gz >$(STATICRELEASE).tar.gz.sha256sum +else + @echo "Skipping releasing $(STATICRELEASE) for $(GOOS)" +endif + # install of cri deps into release output directory ifeq ($(GOOS),windows) install-cri-deps: $(BINARIES) diff --git a/vendor/github.com/containerd/containerd/Protobuild.toml b/vendor/github.com/containerd/containerd/Protobuild.toml index ccc4e79cf0..09a4c97047 100644 --- a/vendor/github.com/containerd/containerd/Protobuild.toml +++ b/vendor/github.com/containerd/containerd/Protobuild.toml @@ -1,6 +1,5 @@ -version = "unstable" -generator = "gogoctrd" -plugins = ["grpc", "fieldpath"] +version = "2" +generators = ["go"] # Control protoc include paths. Below are usually some good defaults, but feel # free to try it without them if it works for your project. @@ -9,32 +8,25 @@ plugins = ["grpc", "fieldpath"] # treat the root of the project as an include, but this may not be necessary. before = ["./protobuf"] - # Paths that should be treated as include roots in relation to the vendor - # directory. These will be calculated with the vendor directory nearest the - # target package. - packages = ["github.com/gogo/protobuf", "github.com/gogo/googleapis"] - # Paths that will be added untouched to the end of the includes. We use # `/usr/local/include` to pickup the common install location of protobuf. # This is the default. after = ["/usr/local/include", "/usr/include"] -# This section maps protobuf imports to Go packages. These will become -# `-M` directives in the call to the go protobuf generator. -[packages] - "gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto" - "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/empty.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - "google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/duration.proto" = "github.com/gogo/protobuf/types" - "google/rpc/status.proto" = "github.com/gogo/googleapis/google/rpc" - [[overrides]] # enable ttrpc and disable fieldpath and grpc for the shim -prefixes = ["github.com/containerd/containerd/runtime/v1/shim/v1", "github.com/containerd/containerd/runtime/v2/task"] -plugins = ["ttrpc"] +prefixes = [ + "github.com/containerd/containerd/runtime/v1/shim/v1", + "github.com/containerd/containerd/api/runtime/task/v2", + "github.com/containerd/containerd/api/runtime/sandbox/v1", +] +generators = ["go", "go-ttrpc"] + +[[overrides]] +prefixes = [ + "github.com/containerd/containerd/third_party/k8s.io/cri-api/pkg/apis/runtime/v1alpha2", +] +generators = ["go", "go-grpc"] # Lock down runc config [[descriptors]] @@ -42,7 +34,6 @@ prefix = "github.com/containerd/containerd/runtime/linux/runctypes" target = "runtime/linux/runctypes/next.pb.txt" ignore_files = [ "google/protobuf/descriptor.proto", - "gogoproto/gogo.proto" ] [[descriptors]] @@ -50,5 +41,4 @@ prefix = "github.com/containerd/containerd/runtime/v2/runc/options" target = "runtime/v2/runc/options/next.pb.txt" ignore_files = [ "google/protobuf/descriptor.proto", - "gogoproto/gogo.proto" ] diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md index f876079abb..25bcaeebea 100644 --- a/vendor/github.com/containerd/containerd/README.md +++ b/vendor/github.com/containerd/containerd/README.md @@ -7,25 +7,38 @@ [![Go Report Card](https://goreportcard.com/badge/github.com/containerd/containerd)](https://goreportcard.com/report/github.com/containerd/containerd) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1271/badge)](https://bestpractices.coreinfrastructure.org/projects/1271) -containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc. +containerd is an industry-standard container runtime with an emphasis on simplicity, robustness, and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc. -containerd is a member of CNCF with ['graduated'](https://landscape.cncf.io/selected=containerd) status. +containerd is a member of CNCF with ['graduated'](https://landscape.cncf.io/?selected=containerd) status. containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users. -![architecture](design/architecture.png) +![architecture](docs/historical/design/architecture.png) -## Now Recruiting +## Announcements + +### Hello Kubernetes v1.24! +The containerd project would like to announce containerd [v1.6.4](https://github.com/containerd/containerd/releases/tag/v1.6.4). While other prior releases are supported, this latest release and the containerd [v1.5.11](https://github.com/containerd/containerd/releases/tag/v1.5.11) release are recommended for Kubernetes v1.24. + +We felt it important to announce this, particularly in view of [the dockershim removal from this release of Kubernetes](https://kubernetes.io/blog/2022/05/03/dockershim-historical-context/). + +It should be noted here that moving to CRI integrations has been in the plan for many years. `containerd` began as part of `Docker` and was donated to `CNCF`. `containerd` remains in use today by Docker/moby/buildkit etc., and has many other [adopters](https://github.com/containerd/containerd/blob/main/ADOPTERS.md). `containerd` has a namespace that isolates use of `containerd` from various clients/adopters. The Kubernetes namespace is appropriately named `k8s.io`. The CRI API and `containerd` CRI plugin project has, from the start, been an effort to reduce the impact surface for Kubernetes container runtime integration. If you can't tell, we are excited to see this come to fruition. + +If you have any concerns or questions, we will be here to answer them in [issues, discussions, and/or on slack](#communication). Below you will find information/detail about our [CRI Integration](#cri) implementation. + +For containerd users already on v1.6.0-v1.6.3, there are known issues addressed by [v1.6.4](https://github.com/containerd/containerd/releases/tag/v1.6.4). The issues are primarily related to [CNI setup](https://github.com/kubernetes/website/blob/dev-1.24/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/troubleshooting-cni-plugin-related-errors.md) + +### Now Recruiting We are a large inclusive OSS project that is welcoming help of any kind shape or form: * Documentation help is needed to make the product easier to consume and extend. -* We need OSS community outreach / organizing help to get the word out; manage -and create messaging and educational content; and to help with social media, community forums/groups, and google groups. +* We need OSS community outreach/organizing help to get the word out; manage +and create messaging and educational content; and help with social media, community forums/groups, and google groups. * We are actively inviting new [security advisors](https://github.com/containerd/project/blob/main/GOVERNANCE.md#security-advisors) to join the team. -* New sub-projects are being created, core and non-core that could use additional development help. +* New subprojects are being created, core and non-core that could use additional development help. * Each of the [containerd projects](https://github.com/containerd) has a list of issues currently being worked on or that need help resolving. - - If the issue has not already been assigned to someone, or has not made recent progress and you are interested, please inquire. - - If you are interested in starting with a smaller / beginner level issue, look for issues with an `exp/beginner` tag, for example [containerd/containerd beginner issues.](https://github.com/containerd/containerd/issues?q=is%3Aissue+is%3Aopen+label%3Aexp%2Fbeginner) + - If the issue has not already been assigned to someone or has not made recent progress, and you are interested, please inquire. + - If you are interested in starting with a smaller/beginner-level issue, look for issues with an `exp/beginner` tag, for example [containerd/containerd beginner issues.](https://github.com/containerd/containerd/issues?q=is%3Aissue+is%3Aopen+label%3Aexp%2Fbeginner) ## Getting Started @@ -102,7 +115,7 @@ func main() { ### Namespaces -Namespaces allow multiple consumers to use the same containerd without conflicting with each other. It has the benefit of sharing content but still having separation with containers and images. +Namespaces allow multiple consumers to use the same containerd without conflicting with each other. It has the benefit of sharing content while maintaining separation with containers and images. To set a namespace for requests to the API: @@ -132,7 +145,7 @@ err := client.Push(context, "docker.io/library/redis:latest", image.Target()) ### Containers -In containerd, a container is a metadata object. Resources such as an OCI runtime specification, image, root filesystem, and other metadata can be attached to a container. +In containerd, a container is a metadata object. Resources such as an OCI runtime specification, image, root filesystem, and other metadata can be attached to a container. ```go redis, err := client.NewContainer(context, "redis-master") @@ -141,7 +154,7 @@ defer redis.Delete(context) ### OCI Runtime Specification -containerd fully supports the OCI runtime specification for running containers. We have built in functions to help you generate runtime specifications based on images as well as custom parameters. +containerd fully supports the OCI runtime specification for running containers. We have built-in functions to help you generate runtime specifications based on images as well as custom parameters. You can specify options when creating a container about how to modify the specification. @@ -151,7 +164,7 @@ redis, err := client.NewContainer(context, "redis-master", containerd.WithNewSpe ### Root Filesystems -containerd allows you to use overlay or snapshot filesystems with your containers. It comes with built in support for overlayfs and btrfs. +containerd allows you to use overlay or snapshot filesystems with your containers. It comes with built-in support for overlayfs and btrfs. ```go // pull an image and unpack it into the configured snapshotter @@ -271,7 +284,7 @@ loaded for the user's shell environment. ### CRI -`cri` is a [containerd](https://containerd.io/) plugin implementation of the Kubernetes [container runtime interface (CRI)](https://github.com/kubernetes/cri-api/blob/master/pkg/apis/runtime/v1alpha2/api.proto). With it, you are able to use containerd as the container runtime for a Kubernetes cluster. +`cri` is a [containerd](https://containerd.io/) plugin implementation of the Kubernetes [container runtime interface (CRI)](https://github.com/kubernetes/cri-api/blob/master/pkg/apis/runtime/v1/api.proto). With it, you are able to use containerd as the container runtime for a Kubernetes cluster. ![cri](./docs/cri/cri.png) @@ -296,7 +309,7 @@ A Kubernetes incubator project, [cri-tools](https://github.com/kubernetes-sigs/c #### CRI Guides * [Installing with Ansible and Kubeadm](contrib/ansible/README.md) -* [For Non-Ansible Users, Preforming a Custom Installation Using the Release Tarball and Kubeadm](docs/cri/installation.md) +* [For Non-Ansible Users, Preforming a Custom Installation Using the Release Tarball and Kubeadm](docs/getting-started.md) * [CRI Plugin Testing Guide](./docs/cri/testing.md) * [Debugging Pods, Containers, and Images with `crictl`](./docs/cri/crictl.md) * [Configuring `cri` Plugins](./docs/cri/config.md) @@ -304,23 +317,23 @@ A Kubernetes incubator project, [cri-tools](https://github.com/kubernetes-sigs/c ### Communication -For async communication and long running discussions please use issues and pull requests on the github repo. +For async communication and long-running discussions please use issues and pull requests on the GitHub repo. This will be the best place to discuss design and implementation. -For sync communication catch us in the `#containerd` and `#containerd-dev` slack channels on Cloud Native Computing Foundation's (CNCF) slack - `cloud-native.slack.com`. Everyone is welcome to join and chat. [Get Invite to CNCF slack.](https://slack.cncf.io) +For sync communication catch us in the `#containerd` and `#containerd-dev` Slack channels on Cloud Native Computing Foundation's (CNCF) Slack - `cloud-native.slack.com`. Everyone is welcome to join and chat. [Get Invite to CNCF Slack.](https://slack.cncf.io) ### Security audit -A third party security audit was performed by Cure53 in 4Q2018; the [full report](docs/SECURITY_AUDIT.pdf) is available in our docs/ directory. +Security audits for the containerd project are hosted on our website. Please see the [security page at containerd.io](https://containerd.io/security/) for more information. ### Reporting security issues -__If you are reporting a security issue, please reach out discreetly at security@containerd.io__. +Please follow the instructions at [containerd/project](https://github.com/containerd/project/blob/main/SECURITY.md#reporting-a-vulnerability) ## Licenses The containerd codebase is released under the [Apache 2.0 license](LICENSE). -The README.md file, and files in the "docs" folder are licensed under the +The README.md file and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License. You may obtain a copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/containerd/containerd/RELEASES.md b/vendor/github.com/containerd/containerd/RELEASES.md index 24762487c8..26855b256b 100644 --- a/vendor/github.com/containerd/containerd/RELEASES.md +++ b/vendor/github.com/containerd/containerd/RELEASES.md @@ -1,7 +1,7 @@ # Versioning and Release This document details the versioning and release plan for containerd. Stability -is a top goal for this project and we hope that this document and the processes +is a top goal for this project, and we hope that this document and the processes it entails will help to achieve that. It covers the release process, versioning numbering, backporting, API stability and support horizons. @@ -74,14 +74,15 @@ to create the milestone or add an issue or PR to an existing milestone. ### Support Horizon Support horizons will be defined corresponding to a release branch, identified -by `.`. Releases branches will be in one of several states: +by `.`. Release branches will be in one of several states: - __*Next*__: The next planned release branch. -- __*Active*__: The release branch is currently supported and accepting patches. +- __*Active*__: The release is a stable branch which is currently supported and accepting patches. - __*Extended*__: The release branch is only accepting security patches. +- __*LTS*__: The release is a long term stable branch which is currently supported and accepting patches. - __*End of Life*__: The release branch is no longer supported and no new patches will be accepted. -Releases will be supported up to one year after a _minor_ release. This means that +Releases will be supported at least one year after a _minor_ release. This means that we will accept bug reports and backports to release branches until the end of life date. If no new _minor_ release has been made, that release will be considered supported until 6 months after the next _minor_ is released or one year, @@ -89,45 +90,71 @@ whichever is longer. Additionally, releases may have an extended security suppor period after the end of the active period to accept security backports. This timeframe will be decided by maintainers before the end of the active status. +Long term stable (_LTS_) releases will be supported for at least three years after +their initial _minor_ release. These branches will accept bug reports and +backports until the end of life date. They may also accept a wider range of +patches than non-_LTS_ releases to support the longer term maintainability of the +branch, including library dependency, toolchain (including Go) and other version updates +which are needed to ensure each release is built with fully supported dependencies and +remains usable by containerd clients. There should be at least a 6-month overlap between +the end of life of an _LTS_ release and the initial release of a new _LTS_ release. +Up to 6 months before the announced end of life of an _LTS_ branch, the branch may +convert to a regular _Active_ release with stricter backport criteria. + The current state is available in the following tables: -| Release | Status | Start | End of Life | -|---------|-------------|------------------|-------------------| -| [0.0](https://github.com/containerd/containerd/releases/tag/0.0.5) | End of Life | Dec 4, 2015 | - | -| [0.1](https://github.com/containerd/containerd/releases/tag/v0.1.0) | End of Life | Mar 21, 2016 | - | -| [0.2](https://github.com/containerd/containerd/tree/v0.2.x) | End of Life | Apr 21, 2016 | December 5, 2017 | -| [1.0](https://github.com/containerd/containerd/releases/tag/v1.0.3) | End of Life | December 5, 2017 | December 5, 2018 | -| [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.8) | End of Life | April 23, 2018 | October 23, 2019 | -| [1.2](https://github.com/containerd/containerd/releases/tag/v1.2.13) | End of Life | October 24, 2018 | October 15, 2020 | -| [1.3](https://github.com/containerd/containerd/releases/tag/v1.3.10) | End of Life | September 26, 2019 | March 4, 2021 | -| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.12) | Extended | August 17, 2020 | March 3, 2022 (Extended) | -| [1.5](https://github.com/containerd/containerd/releases/tag/v1.5.9) | Active | May 3, 2021 | October 28, 2022 | -| [1.6](https://github.com/containerd/containerd/releases/tag/v1.6.0) | Active | February 15, 2022 | max(February 15, 2023 or release of 1.7.0 + 6 months) | -| [1.7](https://github.com/containerd/containerd/milestone/42) | Next | TBD | TBD | +| Release | Status | Start | End of Life | +| --------- | ------------- | ------------------ | ------------------- | +| [0.0](https://github.com/containerd/containerd/releases/tag/0.0.5) | End of Life | Dec 4, 2015 | - | +| [0.1](https://github.com/containerd/containerd/releases/tag/v0.1.0) | End of Life | Mar 21, 2016 | - | +| [0.2](https://github.com/containerd/containerd/tree/v0.2.x) | End of Life | Apr 21, 2016 | December 5, 2017 | +| [1.0](https://github.com/containerd/containerd/releases/tag/v1.0.3) | End of Life | December 5, 2017 | December 5, 2018 | +| [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.8) | End of Life | April 23, 2018 | October 23, 2019 | +| [1.2](https://github.com/containerd/containerd/releases/tag/v1.2.13) | End of Life | October 24, 2018 | October 15, 2020 | +| [1.3](https://github.com/containerd/containerd/releases/tag/v1.3.10) | End of Life | September 26, 2019 | March 4, 2021 | +| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.13) | End of Life | August 17, 2020 | March 3, 2022 | +| [1.5](https://github.com/containerd/containerd/releases/tag/v1.5.18) | End of Life | May 3, 2021 | February 28, 2023 | +| [1.6](https://github.com/containerd/containerd/releases/tag/v1.6.19) | LTS | February 15, 2022 | max(February 15, 2025 or next LTS + 6 months) | +| [1.7](https://github.com/containerd/containerd/releases/tag/v1.7.0) | Active | March 10, 2023 | max(March 10, 2024 or release of 2.0 + 6 months) | +| [2.0](https://github.com/containerd/containerd/milestone/35) | Next | TBD | TBD | -Note that branches and release from before 1.0 may not follow these rules. -| CRI-Containerd Version | Containerd Version | Kubernetes Version | CRI Version | -|------------------------|--------------------|--------------------|--------------| -| v1.0.0-alpha.x | | 1.7, 1.8 | v1alpha1 | -| v1.0.0-beta.x | | 1.9 | v1alpha1 | -| End-Of-Life | v1.1 (End-Of-Life) | 1.10+ | v1alpha2 | -| | v1.2 (End-Of-Life) | 1.10+ | v1alpha2 | -| | v1.3 (End-Of-Life) | 1.12+ | v1alpha2 | -| | v1.4 | 1.19+ | v1alpha2 | -| | v1.5 | 1.20+ | v1alpha2 | -| | v1.6 | 1.23+ | v1, v1alpha2 | +### Kubernetes Support -**Note:** The support table above specifies the Kubernetes Version that was supported at time of release of the containerd - cri integration and Kubernetes only supports n-3 minor release versions. +The Kubernetes version matrix represents the versions of containerd which are +recommended for a Kubernetes release. Any actively supported version of +containerd may receive patches to fix bugs encountered in any version of +Kubernetes, however, our recommendation is based on which versions have been +the most thoroughly tested. See the [Kubernetes test grid](https://testgrid.k8s.io/sig-node-containerd) +for the list of actively tested versions. Kubernetes only supports n-3 minor +release versions and containerd will ensure there is always a supported version +of containerd for every supported version of Kubernetes. -These tables should be updated as part of the release preparation process. +| Kubernetes Version | containerd Version | CRI Version | +|--------------------|--------------------|-----------------| +| 1.24 | 1.7.0+, 1.6.4+ | v1, v1alpha2 | +| 1.25 | 1.7.0+, 1.6.4+ | v1, v1alpha2 ** | +| 1.26 | 1.7.0+, 1.6.15+ | v1 | + +** Note: containerd v1.6.*, and v1.7.* support CRI v1 and v1alpha2 through EOL as those releases continue to support older versions of k8s, cloud providers, and other clients using CRI v1alpha2. CRI v1alpha2 is deprecated in v1.7 and will be removed in containerd v2.0. + +Deprecated containerd and kubernetes versions + +| Containerd Version | Kubernetes Version | CRI Version | +|--------------------------|--------------------|----------------------| +| v1.0 (w/ cri-containerd) | 1.7, 1.8, 1.9 | v1alpha1 | +| v1.1 | 1.10+ | v1alpha2 | +| v1.2 | 1.10+ | v1alpha2 | +| v1.3 | 1.12+ | v1alpha2 | +| v1.4 | 1.19+ | v1alpha2 | +| v1.5 | 1.20+ | v1 (1.23+), v1alpha2 | ### Backporting Backports in containerd are community driven. As maintainers, we'll try to ensure that sensible bugfixes make it into _active_ release, but our main focus will be features for the next _minor_ or _major_ release. For the most part, -this process is straightforward and we are here to help make it as smooth as +this process is straightforward, and we are here to help make it as smooth as possible. If there are important fixes that need to be backported, please let us know in @@ -137,7 +164,10 @@ one of three ways: 2. Open a PR with cherry-picked change from main. 3. Open a PR with a ported fix. -__If you are reporting a security issue, please reach out discreetly at security@containerd.io__. +__If you are reporting a security issue:__ + +Please follow the instructions at [containerd/project](https://github.com/containerd/project/blob/main/SECURITY.md#reporting-a-vulnerability) + Remember that backported PRs must follow the versioning guidelines from this document. Any release that is "active" can accept backports. Opening a backport PR is @@ -145,7 +175,7 @@ fairly straightforward. The steps differ depending on whether you are pulling a fix from main or need to draft a new commit specific to a particular branch. -To cherry pick a straightforward commit from main, simply use the cherry pick +To cherry-pick a straightforward commit from main, simply use the cherry-pick process: 1. Pick the branch to which you want backported, usually in the format @@ -162,6 +192,9 @@ process: ```console $ git cherry-pick -xsS ``` + (Optional) If other commits exist in the main branch which are related + to the cherry-picked commit; eg: fixes to the main PR. It is recommended + to cherry-pick those commits also into this same `my-backport-branch`. 4. Push the branch and open up a PR against the _release branch_: ``` @@ -241,7 +274,7 @@ Plugins implemented in tree are supported by the containerd community unless exp Out of tree plugins are not supported by the containerd maintainers. Currently, the Windows runtime and snapshot plugins are not stable and not supported. -Please refer to the github milestones for Windows support in a future release. +Please refer to the GitHub milestones for Windows support in a future release. #### Error Codes @@ -253,7 +286,7 @@ new version of the service. If you find that an error code that is required by your application is not well-documented in the protobuf service description or tested explicitly, -please file and issue and we will clarify. +please file an issue and we will clarify. #### Opaque Fields @@ -281,7 +314,7 @@ be a matter of fixing compilation errors and moving from there. The CRI (Container Runtime Interface) GRPC API is used by a Kubernetes kubelet to communicate with a container runtime. This interface is used to manage -container lifecycles and container images. Currently this API is under +container lifecycles and container images. Currently, this API is under development and unstable across Kubernetes releases. Each Kubernetes release only supports a single version of CRI and the CRI plugin only implements a single version of CRI. @@ -294,7 +327,7 @@ version of Kubernetes which supports that version of CRI. The `ctr` tool provides the ability to introspect and understand the containerd API. It is not considered a primary offering of the project and is unsupported in -that sense. While we understand it's value as a debug tool, it may be completely +that sense. While we understand its value as a debug tool, it may be completely refactored or have breaking changes in _minor_ releases. Targeting `ctr` for feature additions reflects a misunderstanding of the containerd @@ -336,10 +369,84 @@ against total impact. The deprecated features are shown in the following table: -| Component | Deprecation release | Target release for removal | Recommendation | -|----------------------------------------------------------------------|---------------------|----------------------------|-----------------------------------| -| Runtime V1 API and implementation (`io.containerd.runtime.v1.linux`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` | -| Runc V1 implementation of Runtime V2 (`io.containerd.runc.v1`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` | -| config.toml `version = 1` | containerd v1.5 | containerd v2.0 | Use config.toml `version = 2` | -| Built-in `aufs` snapshotter | containerd v1.5 | containerd v2.0 | Use `overlayfs` snapshotter | -| `cri-containerd-*.tar.gz` release bundles | containerd v1.6 | containerd v2.0 | Use `containerd-*.tar.gz` bundles | +| Component | Deprecation release | Target release for removal | Recommendation | +|----------------------------------------------------------------------------------|---------------------|----------------------------|------------------------------------------| +| Runtime V1 API and implementation (`io.containerd.runtime.v1.linux`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` | +| Runc V1 implementation of Runtime V2 (`io.containerd.runc.v1`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` | +| config.toml `version = 1` | containerd v1.5 | containerd v2.0 | Use config.toml `version = 2` | +| Built-in `aufs` snapshotter | containerd v1.5 | containerd v2.0 | Use `overlayfs` snapshotter | +| Container label `containerd.io/restart.logpath` | containerd v1.5 | containerd v2.0 | Use `containerd.io/restart.loguri` label | +| `cri-containerd-*.tar.gz` release bundles | containerd v1.6 | containerd v2.0 | Use `containerd-*.tar.gz` bundles | +| Pulling Schema 1 images (`application/vnd.docker.distribution.manifest.v1+json`) | containerd v1.7 | containerd v2.0 | Use Schema 2 or OCI images | +| CRI `v1alpha2` | containerd v1.7 | containerd v2.0 | Use CRI `v1` | + +### Deprecated config properties +The deprecated properties in [`config.toml`](./docs/cri/config.md) are shown in the following table: + +| Property Group | Property | Deprecation release | Target release for removal | Recommendation | +|----------------------------------------------------------------------|------------------------------|---------------------|----------------------------|-------------------------------------------------| +|`[plugins."io.containerd.grpc.v1.cri"]` | `systemd_cgroup` | containerd v1.3 | containerd v2.0 | Use `SystemdCgroup` in runc options (see below) | +|`[plugins."io.containerd.grpc.v1.cri".containerd]` | `untrusted_workload_runtime` | containerd v1.2 | containerd v2.0 | Create `untrusted` runtime in `runtimes` | +|`[plugins."io.containerd.grpc.v1.cri".containerd]` | `default_runtime` | containerd v1.3 | containerd v2.0 | Use `default_runtime_name` | +|`[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.*]` | `runtime_engine` | containerd v1.3 | containerd v2.0 | Use runtime v2 | +|`[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.*]` | `runtime_root` | containerd v1.3 | containerd v2.0 | Use `options.Root` | +|`[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.*.options]` | `CriuPath` | containerd v1.7 | containerd v2.0 | Set `$PATH` to the `criu` binary | +|`[plugins."io.containerd.grpc.v1.cri".registry]` | `auths` | containerd v1.3 | containerd v2.0 | Use [`ImagePullSecrets`](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). See also [#8228](https://github.com/containerd/containerd/issues/8228). | +|`[plugins."io.containerd.grpc.v1.cri".registry]` | `configs` | containerd v1.5 | containerd v2.0 | Use [`config_path`](./docs/hosts.md) | +|`[plugins."io.containerd.grpc.v1.cri".registry]` | `mirrors` | containerd v1.5 | containerd v2.0 | Use [`config_path`](./docs/hosts.md) | + +> **Note** +> +> CNI Config Template (`plugins."io.containerd.grpc.v1.cri".cni.conf_template`) was once deprecated in v1.7.0, +> but its deprecation was cancelled in v1.7.3. + +
Example: runc option SystemdCgroup

+ +```toml +version = 2 + +# OLD +# [plugins."io.containerd.grpc.v1.cri"] +# systemd_cgroup = true + +# NEW +[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true +``` + +

+ +
Example: runc option Root

+ +```toml +version = 2 + +# OLD +# [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] +# runtime_root = "/path/to/runc/root" + +# NEW +[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + Root = "/path/to/runc/root" +``` + +

+ +## Experimental features + +Experimental features are new features added to containerd which do not have the +same stability guarantees as the rest of containerd. An effort is made to avoid +breaking interfaces between versions, but changes to experimental features before +being fully supported is possible. Users can still expect experimental features +to be high quality and are encouraged to use new features to help them stabilize +more quickly. + +| Component | Initial Release | Target Supported Release | +|----------------------------------------------------------------------------------------|-----------------|--------------------------| +| [Sandbox Service](https://github.com/containerd/containerd/pull/6703) | containerd v1.7 | containerd v2.0 | +| [Sandbox CRI Server](https://github.com/containerd/containerd/pull/7228) | containerd v1.7 | containerd v2.0 | +| [Transfer Service](https://github.com/containerd/containerd/pull/7320) | containerd v1.7 | containerd v2.0 | +| [NRI in CRI Support](https://github.com/containerd/containerd/pull/6019) | containerd v1.7 | containerd v2.0 | +| [gRPC Shim](https://github.com/containerd/containerd/pull/8052) | containerd v1.7 | containerd v2.0 | +| [CRI Runtime Specific Snapshotter](https://github.com/containerd/containerd/pull/6899) | containerd v1.7 | containerd v2.0 | +| [CRI Support for User Namespaces](https://github.com/containerd/containerd/pull/7679) | containerd v1.7 | containerd v2.0 | diff --git a/vendor/github.com/containerd/containerd/ROADMAP.md b/vendor/github.com/containerd/containerd/ROADMAP.md index aacd107392..cf542e1e45 100644 --- a/vendor/github.com/containerd/containerd/ROADMAP.md +++ b/vendor/github.com/containerd/containerd/ROADMAP.md @@ -1,7 +1,7 @@ # containerd roadmap containerd uses the issues and milestones to define its roadmap. -`ROADMAP.md` files are common in open source projects but we find they quickly become out of date. +`ROADMAP.md` files are common in open source projects, but we find they quickly become out of date. We opt for an issues and milestone approach that our maintainers and community can keep up-to-date as work is added and completed. ## Issues diff --git a/vendor/github.com/containerd/containerd/SCOPE.md b/vendor/github.com/containerd/containerd/SCOPE.md index aec9da9158..3eb1c55fc2 100644 --- a/vendor/github.com/containerd/containerd/SCOPE.md +++ b/vendor/github.com/containerd/containerd/SCOPE.md @@ -31,7 +31,7 @@ Additional implementations will not be accepted into the core repository and sho ## Scope The following table specifies the various components of containerd and general features of container runtimes. -The table specifies whether or not the feature/component is in or out of scope. +The table specifies whether the feature/component is in or out of scope. | Name | Description | In/Out | Reason | |------------------------------|--------------------------------------------------------------------------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| diff --git a/vendor/github.com/containerd/containerd/Vagrantfile b/vendor/github.com/containerd/containerd/Vagrantfile index 71946f052b..dbe3ac7ecc 100644 --- a/vendor/github.com/containerd/containerd/Vagrantfile +++ b/vendor/github.com/containerd/containerd/Vagrantfile @@ -17,21 +17,29 @@ # Vagrantfile for Fedora and EL Vagrant.configure("2") do |config| - config.vm.box = ENV["BOX"] || "fedora/37-cloud-base" - config.vm.box_version = ENV["BOX_VERSION"] + config.vm.box = ENV["BOX"] ? ENV["BOX"].split("@")[0] : "fedora/37-cloud-base" + # BOX_VERSION is deprecated. Use "BOX=@". + config.vm.box_version = ENV["BOX_VERSION"] || (ENV["BOX"].split("@")[1] if ENV["BOX"]) + memory = 4096 cpus = 2 - config.vm.provider :virtualbox do |v| + disk_size = 60 + config.vm.provider :virtualbox do |v, o| v.memory = memory v.cpus = cpus + # Needs env var VAGRANT_EXPERIMENTAL="disks" + o.vm.disk :disk, size: "#{disk_size}GB", primary: true end config.vm.provider :libvirt do |v| v.memory = memory v.cpus = cpus + v.machine_virtual_size = disk_size end config.vm.synced_folder ".", "/vagrant", type: "rsync" + config.vm.provision 'shell', path: 'script/resize-vagrant-root.sh' + # Disabled by default. To run: # vagrant up --provision-with=upgrade-packages # To upgrade only specific packages: @@ -200,6 +208,19 @@ EOF SHELL end + config.vm.provision "install-failpoint-binaries", type: "shell", run: "once" do |sh| + sh.upload_path = "/tmp/vagrant-install-failpoint-binaries" + sh.inline = <<~SHELL + #!/usr/bin/env bash + source /etc/environment + source /etc/profile.d/sh.local + set -eux -o pipefail + ${GOPATH}/src/github.com/containerd/containerd/script/setup/install-failpoint-binaries + chcon -v -t container_runtime_exec_t $(type -ap containerd-shim-runc-fp-v1) + containerd-shim-runc-fp-v1 -v + SHELL + end + # SELinux is Enforcing by default. # To set SELinux as Disabled on a VM that has already been provisioned: # SELINUX=Disabled vagrant up --provision-with=selinux @@ -225,6 +246,7 @@ EOF 'RUNC_FLAVOR': ENV['RUNC_FLAVOR'] || "runc", 'GOTEST': ENV['GOTEST'] || "go test", 'GOTESTSUM_JUNITFILE': ENV['GOTESTSUM_JUNITFILE'], + 'GOTESTSUM_JSONFILE': ENV['GOTESTSUM_JSONFILE'], } sh.inline = <<~SHELL #!/usr/bin/env bash @@ -238,6 +260,36 @@ EOF SHELL end + # SELinux is Enforcing by default (via provisioning) in this VM. To re-run with SELinux disabled: + # SELINUX=Disabled vagrant up --provision-with=selinux,test-cri-integration + # + config.vm.provision "test-cri-integration", type: "shell", run: "never" do |sh| + sh.upload_path = "/tmp/test-cri-integration" + sh.env = { + 'GOTEST': ENV['GOTEST'] || "go test", + 'GOTESTSUM_JUNITFILE': ENV['GOTESTSUM_JUNITFILE'], + 'GOTESTSUM_JSONFILE': ENV['GOTESTSUM_JSONFILE'], + 'GITHUB_WORKSPACE': '', + 'ENABLE_CRI_SANDBOXES': ENV['ENABLE_CRI_SANDBOXES'], + } + sh.inline = <<~SHELL + #!/usr/bin/env bash + source /etc/environment + source /etc/profile.d/sh.local + set -eux -o pipefail + cleanup() { + rm -rf /var/lib/containerd* /run/containerd* /tmp/containerd* /tmp/test* /tmp/failpoint* /tmp/nri* + } + cleanup + cd ${GOPATH}/src/github.com/containerd/containerd + # cri-integration.sh executes containerd from ./bin, not from $PATH . + make BUILDTAGS="seccomp selinux no_aufs no_btrfs no_devmapper no_zfs" binaries bin/cri-integration.test + chcon -v -t container_runtime_exec_t ./bin/{containerd,containerd-shim*} + CONTAINERD_RUNTIME=io.containerd.runc.v2 ./script/test/cri-integration.sh + cleanup + SHELL + end + # SELinux is Enforcing by default (via provisioning) in this VM. To re-run with SELinux disabled: # SELINUX=Disabled vagrant up --provision-with=selinux,test-cri # diff --git a/vendor/github.com/containerd/containerd/api/events/container.pb.go b/vendor/github.com/containerd/containerd/api/events/container.pb.go index fe002e0736..d7d40258c3 100644 --- a/vendor/github.com/containerd/containerd/api/events/container.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/container.pb.go @@ -1,1413 +1,431 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/events/container.proto package events import ( - fmt "fmt" - github_com_containerd_typeurl "github.com/containerd/typeurl" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type ContainerCreate struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` - Runtime *ContainerCreate_Runtime `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + Runtime *ContainerCreate_Runtime `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"` +} + +func (x *ContainerCreate) Reset() { + *x = ContainerCreate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContainerCreate) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ContainerCreate) Reset() { *m = ContainerCreate{} } func (*ContainerCreate) ProtoMessage() {} + +func (x *ContainerCreate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContainerCreate.ProtoReflect.Descriptor instead. func (*ContainerCreate) Descriptor() ([]byte, []int) { - return fileDescriptor_0d1f05b8626f83ea, []int{0} + return file_github_com_containerd_containerd_api_events_container_proto_rawDescGZIP(), []int{0} } -func (m *ContainerCreate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ContainerCreate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + +func (x *ContainerCreate) GetID() string { + if x != nil { + return x.ID } -} -func (m *ContainerCreate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerCreate.Merge(m, src) -} -func (m *ContainerCreate) XXX_Size() int { - return m.Size() -} -func (m *ContainerCreate) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerCreate.DiscardUnknown(m) + return "" } -var xxx_messageInfo_ContainerCreate proto.InternalMessageInfo - -type ContainerCreate_Runtime struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Options *types.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ContainerCreate_Runtime) Reset() { *m = ContainerCreate_Runtime{} } -func (*ContainerCreate_Runtime) ProtoMessage() {} -func (*ContainerCreate_Runtime) Descriptor() ([]byte, []int) { - return fileDescriptor_0d1f05b8626f83ea, []int{0, 0} -} -func (m *ContainerCreate_Runtime) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerCreate_Runtime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ContainerCreate_Runtime.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ContainerCreate) GetImage() string { + if x != nil { + return x.Image } -} -func (m *ContainerCreate_Runtime) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerCreate_Runtime.Merge(m, src) -} -func (m *ContainerCreate_Runtime) XXX_Size() int { - return m.Size() -} -func (m *ContainerCreate_Runtime) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerCreate_Runtime.DiscardUnknown(m) + return "" } -var xxx_messageInfo_ContainerCreate_Runtime proto.InternalMessageInfo +func (x *ContainerCreate) GetRuntime() *ContainerCreate_Runtime { + if x != nil { + return x.Runtime + } + return nil +} type ContainerUpdate struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` - Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - SnapshotKey string `protobuf:"bytes,4,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SnapshotKey string `protobuf:"bytes,4,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"` } -func (m *ContainerUpdate) Reset() { *m = ContainerUpdate{} } -func (*ContainerUpdate) ProtoMessage() {} -func (*ContainerUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_0d1f05b8626f83ea, []int{1} -} -func (m *ContainerUpdate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ContainerUpdate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ContainerUpdate) Reset() { + *x = ContainerUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ContainerUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerUpdate.Merge(m, src) -} -func (m *ContainerUpdate) XXX_Size() int { - return m.Size() -} -func (m *ContainerUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerUpdate.DiscardUnknown(m) + +func (x *ContainerUpdate) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ContainerUpdate proto.InternalMessageInfo +func (*ContainerUpdate) ProtoMessage() {} + +func (x *ContainerUpdate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContainerUpdate.ProtoReflect.Descriptor instead. +func (*ContainerUpdate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_container_proto_rawDescGZIP(), []int{1} +} + +func (x *ContainerUpdate) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *ContainerUpdate) GetImage() string { + if x != nil { + return x.Image + } + return "" +} + +func (x *ContainerUpdate) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *ContainerUpdate) GetSnapshotKey() string { + if x != nil { + return x.SnapshotKey + } + return "" +} type ContainerDelete struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ContainerDelete) Reset() { + *x = ContainerDelete{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContainerDelete) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ContainerDelete) Reset() { *m = ContainerDelete{} } func (*ContainerDelete) ProtoMessage() {} + +func (x *ContainerDelete) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContainerDelete.ProtoReflect.Descriptor instead. func (*ContainerDelete) Descriptor() ([]byte, []int) { - return fileDescriptor_0d1f05b8626f83ea, []int{2} + return file_github_com_containerd_containerd_api_events_container_proto_rawDescGZIP(), []int{2} } -func (m *ContainerDelete) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + +func (x *ContainerDelete) GetID() string { + if x != nil { + return x.ID + } + return "" } -func (m *ContainerDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ContainerDelete.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +type ContainerCreate_Runtime struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Options *anypb.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *ContainerCreate_Runtime) Reset() { + *x = ContainerCreate_Runtime{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContainerCreate_Runtime) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContainerCreate_Runtime) ProtoMessage() {} + +func (x *ContainerCreate_Runtime) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *ContainerDelete) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerDelete.Merge(m, src) -} -func (m *ContainerDelete) XXX_Size() int { - return m.Size() -} -func (m *ContainerDelete) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerDelete.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_ContainerDelete proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ContainerCreate)(nil), "containerd.events.ContainerCreate") - proto.RegisterType((*ContainerCreate_Runtime)(nil), "containerd.events.ContainerCreate.Runtime") - proto.RegisterType((*ContainerUpdate)(nil), "containerd.events.ContainerUpdate") - proto.RegisterMapType((map[string]string)(nil), "containerd.events.ContainerUpdate.LabelsEntry") - proto.RegisterType((*ContainerDelete)(nil), "containerd.events.ContainerDelete") +// Deprecated: Use ContainerCreate_Runtime.ProtoReflect.Descriptor instead. +func (*ContainerCreate_Runtime) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_container_proto_rawDescGZIP(), []int{0, 0} } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/events/container.proto", fileDescriptor_0d1f05b8626f83ea) +func (x *ContainerCreate_Runtime) GetName() string { + if x != nil { + return x.Name + } + return "" } -var fileDescriptor_0d1f05b8626f83ea = []byte{ - // 413 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x0a, 0xd3, 0x30, - 0x18, 0xc7, 0x97, 0x76, 0x6e, 0x98, 0x0a, 0x6a, 0x18, 0x52, 0x7b, 0xa8, 0x73, 0xa7, 0xe9, 0x21, - 0x85, 0x7a, 0x51, 0x77, 0xd1, 0x6d, 0x0a, 0xa2, 0x82, 0x14, 0x84, 0xe1, 0x45, 0xd2, 0x35, 0xeb, - 0x82, 0x6d, 0x52, 0xda, 0x74, 0xd0, 0x9b, 0x8f, 0xe2, 0xe3, 0xec, 0xe8, 0xc1, 0x83, 0x27, 0x71, - 0x05, 0xdf, 0xc0, 0x07, 0x90, 0x26, 0xeb, 0x56, 0x14, 0x95, 0x9d, 0xfa, 0xcf, 0xd7, 0xff, 0x3f, - 0xdf, 0xf7, 0xfb, 0x08, 0x9c, 0xc5, 0x4c, 0x6e, 0xcb, 0x10, 0xaf, 0x45, 0xea, 0xad, 0x05, 0x97, - 0x84, 0x71, 0x9a, 0x47, 0x5d, 0x49, 0x32, 0xe6, 0xd1, 0x1d, 0xe5, 0xb2, 0x38, 0x57, 0x71, 0x96, - 0x0b, 0x29, 0xd0, 0xcd, 0xb3, 0x0d, 0x6b, 0x8b, 0x73, 0x3b, 0x16, 0x22, 0x4e, 0xa8, 0xa7, 0x0c, - 0x61, 0xb9, 0xf1, 0x08, 0xaf, 0xb4, 0xdb, 0x19, 0xc5, 0x22, 0x16, 0x4a, 0x7a, 0x8d, 0x3a, 0x56, - 0x9f, 0xfc, 0x77, 0x80, 0xd3, 0x55, 0x59, 0x52, 0xc6, 0x8c, 0x7b, 0x1b, 0x46, 0x93, 0x28, 0x23, - 0x72, 0xab, 0x6f, 0x98, 0x7c, 0x01, 0xf0, 0xfa, 0xa2, 0xb5, 0x2f, 0x72, 0x4a, 0x24, 0x45, 0xb7, - 0xa0, 0xc1, 0x22, 0x1b, 0x8c, 0xc1, 0xf4, 0xea, 0x7c, 0x50, 0x7f, 0xbb, 0x63, 0xbc, 0x58, 0x06, - 0x06, 0x8b, 0xd0, 0x08, 0x5e, 0x61, 0x29, 0x89, 0xa9, 0x6d, 0x34, 0xbf, 0x02, 0x7d, 0x40, 0x4b, - 0x38, 0xcc, 0x4b, 0x2e, 0x59, 0x4a, 0x6d, 0x73, 0x0c, 0xa6, 0x96, 0x7f, 0x1f, 0xff, 0x41, 0x86, - 0x7f, 0x6b, 0x81, 0x03, 0x9d, 0x08, 0xda, 0xa8, 0xf3, 0x1a, 0x0e, 0x8f, 0x35, 0x84, 0x60, 0x9f, - 0x93, 0x94, 0xea, 0x01, 0x02, 0xa5, 0x11, 0x86, 0x43, 0x91, 0x49, 0x26, 0x78, 0xa1, 0x9a, 0x5b, - 0xfe, 0x08, 0xeb, 0x5d, 0xe1, 0x16, 0x10, 0x3f, 0xe5, 0x55, 0xd0, 0x9a, 0x26, 0x3f, 0xba, 0x58, - 0x6f, 0xb3, 0xe8, 0x72, 0xac, 0xe7, 0x70, 0x90, 0x90, 0x90, 0x26, 0x85, 0x6d, 0x8e, 0xcd, 0xa9, - 0xe5, 0xe3, 0x7f, 0x51, 0xe9, 0x0e, 0xf8, 0x95, 0x0a, 0x3c, 0xe3, 0x32, 0xaf, 0x82, 0x63, 0x1a, - 0xdd, 0x85, 0xd7, 0x0a, 0x4e, 0xb2, 0x62, 0x2b, 0xe4, 0xfb, 0x0f, 0xb4, 0xb2, 0xfb, 0xaa, 0x89, - 0xd5, 0xd6, 0x5e, 0xd2, 0xca, 0x79, 0x04, 0xad, 0x4e, 0x12, 0xdd, 0x80, 0x66, 0x63, 0xd4, 0xf8, - 0x8d, 0x6c, 0x26, 0xdc, 0x91, 0xa4, 0x3c, 0x4d, 0xa8, 0x0e, 0x8f, 0x8d, 0x87, 0x60, 0x72, 0xaf, - 0x83, 0xb9, 0xa4, 0x09, 0xfd, 0x3b, 0xe6, 0xfc, 0xcd, 0xfe, 0xe0, 0xf6, 0xbe, 0x1e, 0xdc, 0xde, - 0xc7, 0xda, 0x05, 0xfb, 0xda, 0x05, 0x9f, 0x6b, 0x17, 0x7c, 0xaf, 0x5d, 0xf0, 0xe9, 0xa7, 0x0b, - 0xde, 0xf9, 0x17, 0x3c, 0xe5, 0x99, 0xfe, 0xac, 0xc0, 0xca, 0x08, 0x07, 0x6a, 0xff, 0x0f, 0x7e, - 0x05, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x09, 0xe0, 0xd6, 0x0b, 0x03, 0x00, 0x00, -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ContainerCreate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "id": - return string(m.ID), len(m.ID) > 0 - case "image": - return string(m.Image), len(m.Image) > 0 - case "runtime": - // NOTE(stevvooe): This is probably not correct in many cases. - // We assume that the target message also implements the Field - // method, which isn't likely true in a lot of cases. - // - // If you have a broken build and have found this comment, - // you may be closer to a solution. - if m.Runtime == nil { - return "", false - } - - return m.Runtime.Field(fieldpath[1:]) - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ContainerCreate_Runtime) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - case "options": - decoded, err := github_com_containerd_typeurl.UnmarshalAny(m.Options) - if err != nil { - return "", false - } - - adaptor, ok := decoded.(interface{ Field([]string) (string, bool) }) - if !ok { - return "", false - } - return adaptor.Field(fieldpath[1:]) - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ContainerUpdate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "id": - return string(m.ID), len(m.ID) > 0 - case "image": - return string(m.Image), len(m.Image) > 0 - case "labels": - // Labels fields have been special-cased by name. If this breaks, - // add better special casing to fieldpath plugin. - if len(m.Labels) == 0 { - return "", false - } - value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] - return value, ok - case "snapshot_key": - return string(m.SnapshotKey), len(m.SnapshotKey) > 0 - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ContainerDelete) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "id": - return string(m.ID), len(m.ID) > 0 - } - return "", false -} -func (m *ContainerCreate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerCreate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerCreate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Runtime != nil { - { - size, err := m.Runtime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainer(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Image) > 0 { - i -= len(m.Image) - copy(dAtA[i:], m.Image) - i = encodeVarintContainer(dAtA, i, uint64(len(m.Image))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintContainer(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ContainerCreate_Runtime) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerCreate_Runtime) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerCreate_Runtime) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainer(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintContainer(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ContainerUpdate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerUpdate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.SnapshotKey) > 0 { - i -= len(m.SnapshotKey) - copy(dAtA[i:], m.SnapshotKey) - i = encodeVarintContainer(dAtA, i, uint64(len(m.SnapshotKey))) - i-- - dAtA[i] = 0x22 - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintContainer(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintContainer(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintContainer(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Image) > 0 { - i -= len(m.Image) - copy(dAtA[i:], m.Image) - i = encodeVarintContainer(dAtA, i, uint64(len(m.Image))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintContainer(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ContainerDelete) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerDelete) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintContainer(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintContainer(dAtA []byte, offset int, v uint64) int { - offset -= sovContainer(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ContainerCreate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - l = len(m.Image) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - if m.Runtime != nil { - l = m.Runtime.Size() - n += 1 + l + sovContainer(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ContainerCreate_Runtime) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovContainer(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ContainerUpdate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - l = len(m.Image) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovContainer(uint64(len(k))) + 1 + len(v) + sovContainer(uint64(len(v))) - n += mapEntrySize + 1 + sovContainer(uint64(mapEntrySize)) - } - } - l = len(m.SnapshotKey) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ContainerDelete) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovContainer(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozContainer(x uint64) (n int) { - return sovContainer(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ContainerCreate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerCreate{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "ContainerCreate_Runtime", "ContainerCreate_Runtime", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerCreate_Runtime) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerCreate_Runtime{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerUpdate) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&ContainerUpdate{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Labels:` + mapStringForLabels + `,`, - `SnapshotKey:` + fmt.Sprintf("%v", this.SnapshotKey) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerDelete) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerDelete{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringContainer(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ContainerCreate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerCreate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerCreate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Runtime == nil { - m.Runtime = &ContainerCreate_Runtime{} - } - if err := m.Runtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainer(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainer - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *ContainerCreate_Runtime) GetOptions() *anypb.Any { + if x != nil { + return x.Options } return nil } -func (m *ContainerCreate_Runtime) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Runtime: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Runtime: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &types.Any{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainer(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainer - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerUpdate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerUpdate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerUpdate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContainer - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthContainer - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContainer - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthContainer - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipContainer(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainer - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotKey", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SnapshotKey = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainer(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainer - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +var File_github_com_containerd_containerd_api_events_container_proto protoreflect.FileDescriptor - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerDelete) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerDelete: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerDelete: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainer(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainer - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipContainer(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContainer - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContainer - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContainer - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthContainer - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupContainer - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthContainer - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_api_events_container_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x40, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcc, 0x01, + 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x4d, 0x0a, + 0x07, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xdd, 0x01, 0x0a, + 0x0f, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x46, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4b, 0x65, + 0x79, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x21, 0x0a, 0x0f, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x42, + 0x38, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( - ErrInvalidLengthContainer = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowContainer = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupContainer = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_events_container_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_events_container_proto_rawDescData = file_github_com_containerd_containerd_api_events_container_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_events_container_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_events_container_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_events_container_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_events_container_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_events_container_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_events_container_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_github_com_containerd_containerd_api_events_container_proto_goTypes = []interface{}{ + (*ContainerCreate)(nil), // 0: containerd.events.ContainerCreate + (*ContainerUpdate)(nil), // 1: containerd.events.ContainerUpdate + (*ContainerDelete)(nil), // 2: containerd.events.ContainerDelete + (*ContainerCreate_Runtime)(nil), // 3: containerd.events.ContainerCreate.Runtime + nil, // 4: containerd.events.ContainerUpdate.LabelsEntry + (*anypb.Any)(nil), // 5: google.protobuf.Any +} +var file_github_com_containerd_containerd_api_events_container_proto_depIdxs = []int32{ + 3, // 0: containerd.events.ContainerCreate.runtime:type_name -> containerd.events.ContainerCreate.Runtime + 4, // 1: containerd.events.ContainerUpdate.labels:type_name -> containerd.events.ContainerUpdate.LabelsEntry + 5, // 2: containerd.events.ContainerCreate.Runtime.options:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_events_container_proto_init() } +func file_github_com_containerd_containerd_api_events_container_proto_init() { + if File_github_com_containerd_containerd_api_events_container_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_events_container_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerCreate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_container_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_container_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerDelete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_container_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerCreate_Runtime); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_events_container_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_events_container_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_events_container_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_events_container_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_events_container_proto = out.File + file_github_com_containerd_containerd_api_events_container_proto_rawDesc = nil + file_github_com_containerd_containerd_api_events_container_proto_goTypes = nil + file_github_com_containerd_containerd_api_events_container_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/events/container.proto b/vendor/github.com/containerd/containerd/api/events/container.proto index dfeca308ea..29fd18f88d 100644 --- a/vendor/github.com/containerd/containerd/api/events/container.proto +++ b/vendor/github.com/containerd/containerd/api/events/container.proto @@ -19,8 +19,7 @@ syntax = "proto3"; package containerd.events; import "google/protobuf/any.proto"; -import weak "gogoproto/gogo.proto"; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; diff --git a/vendor/github.com/containerd/containerd/api/events/container_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/container_fieldpath.pb.go new file mode 100644 index 0000000000..c23b07762e --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/container_fieldpath.pb.go @@ -0,0 +1,95 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/container.proto +package events + +import ( + v2 "github.com/containerd/typeurl/v2" + strings "strings" +) + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContainerCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "id": + return string(m.ID), len(m.ID) > 0 + case "image": + return string(m.Image), len(m.Image) > 0 + case "runtime": + // NOTE(stevvooe): This is probably not correct in many cases. + // We assume that the target message also implements the Field + // method, which isn't likely true in a lot of cases. + // + // If you have a broken build and have found this comment, + // you may be closer to a solution. + if m.Runtime == nil { + return "", false + } + return m.Runtime.Field(fieldpath[1:]) + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContainerCreate_Runtime) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "options": + decoded, err := v2.UnmarshalAny(m.Options) + if err != nil { + return "", false + } + adaptor, ok := decoded.(interface{ Field([]string) (string, bool) }) + if !ok { + return "", false + } + return adaptor.Field(fieldpath[1:]) + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContainerUpdate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "id": + return string(m.ID), len(m.ID) > 0 + case "image": + return string(m.Image), len(m.Image) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + case "snapshot_key": + return string(m.SnapshotKey), len(m.SnapshotKey) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContainerDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "id": + return string(m.ID), len(m.ID) > 0 + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/events/content.pb.go b/vendor/github.com/containerd/containerd/api/events/content.pb.go index 0a7ec9325d..9e3f843e2b 100644 --- a/vendor/github.com/containerd/containerd/api/events/content.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/content.pb.go @@ -1,359 +1,168 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/events/content.proto package events import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type ContentDelete struct { - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` +} + +func (x *ContentDelete) Reset() { + *x = ContentDelete{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContentDelete) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ContentDelete) Reset() { *m = ContentDelete{} } func (*ContentDelete) ProtoMessage() {} + +func (x *ContentDelete) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContentDelete.ProtoReflect.Descriptor instead. func (*ContentDelete) Descriptor() ([]byte, []int) { - return fileDescriptor_dfb34b8b808e2ecd, []int{0} -} -func (m *ContentDelete) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContentDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ContentDelete.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ContentDelete) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContentDelete.Merge(m, src) -} -func (m *ContentDelete) XXX_Size() int { - return m.Size() -} -func (m *ContentDelete) XXX_DiscardUnknown() { - xxx_messageInfo_ContentDelete.DiscardUnknown(m) + return file_github_com_containerd_containerd_api_events_content_proto_rawDescGZIP(), []int{0} } -var xxx_messageInfo_ContentDelete proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ContentDelete)(nil), "containerd.events.ContentDelete") +func (x *ContentDelete) GetDigest() string { + if x != nil { + return x.Digest + } + return "" } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/events/content.proto", fileDescriptor_dfb34b8b808e2ecd) -} +var File_github_com_containerd_containerd_api_events_content_proto protoreflect.FileDescriptor -var fileDescriptor_dfb34b8b808e2ecd = []byte{ - // 228 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0x83, 0x45, 0x53, - 0xf3, 0x4a, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x04, 0x11, 0x8a, 0xf4, 0x20, 0x0a, 0xa4, - 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xb2, 0xfa, 0x20, 0x16, 0x44, 0xa1, 0x94, 0x03, 0x41, 0x3b, - 0xc0, 0xea, 0x92, 0x4a, 0xd3, 0xf4, 0x0b, 0x72, 0x4a, 0xd3, 0x33, 0xf3, 0xf4, 0xd3, 0x32, 0x53, - 0x73, 0x52, 0x0a, 0x12, 0x4b, 0x32, 0x20, 0x26, 0x28, 0x45, 0x73, 0xf1, 0x3a, 0x43, 0xec, 0x76, - 0x49, 0xcd, 0x49, 0x2d, 0x49, 0x15, 0xf2, 0xe2, 0x62, 0x4b, 0xc9, 0x4c, 0x4f, 0x2d, 0x2e, 0x91, - 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x74, 0x32, 0x3a, 0x71, 0x4f, 0x9e, 0xe1, 0xd6, 0x3d, 0x79, 0x2d, - 0x24, 0xab, 0xf2, 0x0b, 0x52, 0xf3, 0xe0, 0x76, 0x14, 0xeb, 0xa7, 0xe7, 0xeb, 0x42, 0xb4, 0xe8, - 0xb9, 0x80, 0xa9, 0x20, 0xa8, 0x09, 0x4e, 0x01, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, - 0xd0, 0xf0, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, - 0x63, 0x5c, 0xf0, 0x45, 0x8e, 0x31, 0xca, 0x88, 0x84, 0x00, 0xb2, 0x86, 0x50, 0x11, 0x0c, 0x11, - 0x8c, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x78, 0x99, 0xee, - 0x61, 0x01, 0x00, 0x00, -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ContentDelete) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "digest": - return string(m.Digest), len(m.Digest) > 0 - } - return "", false -} -func (m *ContentDelete) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContentDelete) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContentDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintContent(dAtA []byte, offset int, v uint64) int { - offset -= sovContent(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ContentDelete) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovContent(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozContent(x uint64) (n int) { - return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ContentDelete) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContentDelete{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringContent(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ContentDelete) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContentDelete: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContentDelete: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipContent(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthContent - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupContent - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthContent - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_api_events_content_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x40, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x27, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x42, 0x38, 0x5a, 0x32, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0xa0, + 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowContent = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupContent = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_events_content_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_events_content_proto_rawDescData = file_github_com_containerd_containerd_api_events_content_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_events_content_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_events_content_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_events_content_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_events_content_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_events_content_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_events_content_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_events_content_proto_goTypes = []interface{}{ + (*ContentDelete)(nil), // 0: containerd.events.ContentDelete +} +var file_github_com_containerd_containerd_api_events_content_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_events_content_proto_init() } +func file_github_com_containerd_containerd_api_events_content_proto_init() { + if File_github_com_containerd_containerd_api_events_content_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContentDelete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_events_content_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_events_content_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_events_content_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_events_content_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_events_content_proto = out.File + file_github_com_containerd_containerd_api_events_content_proto_rawDesc = nil + file_github_com_containerd_containerd_api_events_content_proto_goTypes = nil + file_github_com_containerd_containerd_api_events_content_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/events/content.proto b/vendor/github.com/containerd/containerd/api/events/content.proto index b8f84bc89b..97d4241391 100644 --- a/vendor/github.com/containerd/containerd/api/events/content.proto +++ b/vendor/github.com/containerd/containerd/api/events/content.proto @@ -18,12 +18,11 @@ syntax = "proto3"; package containerd.events; -import weak "gogoproto/gogo.proto"; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; message ContentDelete { - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 1; } diff --git a/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go new file mode 100644 index 0000000000..9485b664c1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go @@ -0,0 +1,16 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/content.proto +package events + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContentDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "digest": + return string(m.Digest), len(m.Digest) > 0 + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/events/image.pb.go b/vendor/github.com/containerd/containerd/api/events/image.pb.go index 7470269454..111af29e6c 100644 --- a/vendor/github.com/containerd/containerd/api/events/image.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/image.pb.go @@ -1,1109 +1,330 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/events/image.proto package events import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type ImageCreate struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *ImageCreate) Reset() { *m = ImageCreate{} } -func (*ImageCreate) ProtoMessage() {} -func (*ImageCreate) Descriptor() ([]byte, []int) { - return fileDescriptor_7085610f7b33e042, []int{0} -} -func (m *ImageCreate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ImageCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ImageCreate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ImageCreate) Reset() { + *x = ImageCreate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_image_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ImageCreate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImageCreate.Merge(m, src) -} -func (m *ImageCreate) XXX_Size() int { - return m.Size() -} -func (m *ImageCreate) XXX_DiscardUnknown() { - xxx_messageInfo_ImageCreate.DiscardUnknown(m) + +func (x *ImageCreate) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ImageCreate proto.InternalMessageInfo +func (*ImageCreate) ProtoMessage() {} + +func (x *ImageCreate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_image_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImageCreate.ProtoReflect.Descriptor instead. +func (*ImageCreate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_image_proto_rawDescGZIP(), []int{0} +} + +func (x *ImageCreate) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ImageCreate) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type ImageUpdate struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *ImageUpdate) Reset() { *m = ImageUpdate{} } -func (*ImageUpdate) ProtoMessage() {} -func (*ImageUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_7085610f7b33e042, []int{1} -} -func (m *ImageUpdate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ImageUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ImageUpdate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ImageUpdate) Reset() { + *x = ImageUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_image_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ImageUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImageUpdate.Merge(m, src) -} -func (m *ImageUpdate) XXX_Size() int { - return m.Size() -} -func (m *ImageUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ImageUpdate.DiscardUnknown(m) + +func (x *ImageUpdate) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ImageUpdate proto.InternalMessageInfo +func (*ImageUpdate) ProtoMessage() {} + +func (x *ImageUpdate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_image_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImageUpdate.ProtoReflect.Descriptor instead. +func (*ImageUpdate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_image_proto_rawDescGZIP(), []int{1} +} + +func (x *ImageUpdate) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ImageUpdate) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type ImageDelete struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ImageDelete) Reset() { + *x = ImageDelete{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_image_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImageDelete) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ImageDelete) Reset() { *m = ImageDelete{} } func (*ImageDelete) ProtoMessage() {} + +func (x *ImageDelete) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_image_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImageDelete.ProtoReflect.Descriptor instead. func (*ImageDelete) Descriptor() ([]byte, []int) { - return fileDescriptor_7085610f7b33e042, []int{2} -} -func (m *ImageDelete) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ImageDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ImageDelete.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ImageDelete) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImageDelete.Merge(m, src) -} -func (m *ImageDelete) XXX_Size() int { - return m.Size() -} -func (m *ImageDelete) XXX_DiscardUnknown() { - xxx_messageInfo_ImageDelete.DiscardUnknown(m) + return file_github_com_containerd_containerd_api_events_image_proto_rawDescGZIP(), []int{2} } -var xxx_messageInfo_ImageDelete proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ImageCreate)(nil), "containerd.services.images.v1.ImageCreate") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.images.v1.ImageCreate.LabelsEntry") - proto.RegisterType((*ImageUpdate)(nil), "containerd.services.images.v1.ImageUpdate") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.images.v1.ImageUpdate.LabelsEntry") - proto.RegisterType((*ImageDelete)(nil), "containerd.services.images.v1.ImageDelete") +func (x *ImageDelete) GetName() string { + if x != nil { + return x.Name + } + return "" } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/events/image.proto", fileDescriptor_7085610f7b33e042) -} +var File_github_com_containerd_containerd_api_events_image_proto protoreflect.FileDescriptor -var fileDescriptor_7085610f7b33e042 = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4f, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x67, 0xe6, - 0x26, 0xa6, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xe8, 0x15, 0xa7, - 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, 0x81, 0x15, 0x14, 0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10, - 0x34, 0x17, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e, 0x41, 0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a, - 0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06, 0xc4, 0x02, 0xa5, 0x35, 0x8c, 0x5c, 0xdc, 0x9e, - 0x20, 0xf3, 0x9c, 0x8b, 0x52, 0x13, 0x4b, 0x52, 0x85, 0x84, 0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53, - 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x21, 0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4, - 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x33, 0x3d, 0xbc, 0xae, 0xd2, 0x43, - 0x32, 0x4f, 0xcf, 0x07, 0xac, 0xd1, 0x35, 0xaf, 0xa4, 0xa8, 0x32, 0x08, 0x6a, 0x8a, 0x94, 0x25, - 0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0x25, 0xd4, 0x46, 0x10, 0x53, 0x48, - 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, 0x06, 0xe1, 0x58, 0x31, 0x59, - 0x30, 0x22, 0x9c, 0x1b, 0x5a, 0x90, 0x42, 0x55, 0xe7, 0x42, 0xcc, 0xa3, 0xb6, 0x73, 0x15, 0xa1, - 0xae, 0x75, 0x49, 0xcd, 0x49, 0xc5, 0xee, 0x5a, 0xa7, 0x80, 0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, - 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, - 0x1e, 0xc9, 0x31, 0x2e, 0xf8, 0x22, 0xc7, 0x18, 0x65, 0x44, 0x42, 0xc2, 0xb1, 0x86, 0x50, 0x11, - 0x0c, 0x49, 0x6c, 0xe0, 0xb8, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x41, 0x80, 0x92, 0x17, - 0x77, 0x02, 0x00, 0x00, -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ImageCreate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - case "labels": - // Labels fields have been special-cased by name. If this breaks, - // add better special casing to fieldpath plugin. - if len(m.Labels) == 0 { - return "", false - } - value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] - return value, ok - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ImageUpdate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - case "labels": - // Labels fields have been special-cased by name. If this breaks, - // add better special casing to fieldpath plugin. - if len(m.Labels) == 0 { - return "", false - } - value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] - return value, ok - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ImageDelete) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - } - return "", false -} -func (m *ImageCreate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageCreate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ImageCreate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintImage(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintImage(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintImage(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintImage(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ImageUpdate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageUpdate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ImageUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintImage(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintImage(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintImage(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintImage(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ImageDelete) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageDelete) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ImageDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintImage(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintImage(dAtA []byte, offset int, v uint64) int { - offset -= sovImage(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ImageCreate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovImage(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v))) - n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ImageUpdate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovImage(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v))) - n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ImageDelete) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovImage(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovImage(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozImage(x uint64) (n int) { - return sovImage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ImageCreate) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&ImageCreate{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ImageUpdate) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&ImageUpdate{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ImageDelete) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageDelete{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringImage(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ImageCreate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageCreate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageCreate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImage - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImage - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthImage - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthImage - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthImage - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthImage - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipImage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImage - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImage - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageUpdate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageUpdate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageUpdate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImage - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImage - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthImage - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthImage - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthImage - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthImage - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipImage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImage - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImage - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageDelete) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageDelete: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageDelete: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImage - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImage - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipImage(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowImage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowImage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowImage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthImage - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupImage - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthImage - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_api_events_image_proto_rawDesc = []byte{ + 0x0a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xac, 0x01, 0x0a, 0x0b, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, + 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xac, 0x01, 0x0a, 0x0b, 0x49, 0x6d, + 0x61, 0x67, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, + 0x61, 0x67, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, + 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x21, 0x0a, 0x0b, 0x49, 0x6d, 0x61, 0x67, + 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x38, 0x5a, 0x32, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthImage = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowImage = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupImage = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_events_image_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_events_image_proto_rawDescData = file_github_com_containerd_containerd_api_events_image_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_events_image_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_events_image_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_events_image_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_events_image_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_events_image_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_events_image_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_github_com_containerd_containerd_api_events_image_proto_goTypes = []interface{}{ + (*ImageCreate)(nil), // 0: containerd.services.images.v1.ImageCreate + (*ImageUpdate)(nil), // 1: containerd.services.images.v1.ImageUpdate + (*ImageDelete)(nil), // 2: containerd.services.images.v1.ImageDelete + nil, // 3: containerd.services.images.v1.ImageCreate.LabelsEntry + nil, // 4: containerd.services.images.v1.ImageUpdate.LabelsEntry +} +var file_github_com_containerd_containerd_api_events_image_proto_depIdxs = []int32{ + 3, // 0: containerd.services.images.v1.ImageCreate.labels:type_name -> containerd.services.images.v1.ImageCreate.LabelsEntry + 4, // 1: containerd.services.images.v1.ImageUpdate.labels:type_name -> containerd.services.images.v1.ImageUpdate.LabelsEntry + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_events_image_proto_init() } +func file_github_com_containerd_containerd_api_events_image_proto_init() { + if File_github_com_containerd_containerd_api_events_image_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_events_image_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageCreate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_image_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_image_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageDelete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_events_image_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_events_image_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_events_image_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_events_image_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_events_image_proto = out.File + file_github_com_containerd_containerd_api_events_image_proto_rawDesc = nil + file_github_com_containerd_containerd_api_events_image_proto_goTypes = nil + file_github_com_containerd_containerd_api_events_image_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/events/image.proto b/vendor/github.com/containerd/containerd/api/events/image.proto index fe455b54ca..c09c7384f3 100644 --- a/vendor/github.com/containerd/containerd/api/events/image.proto +++ b/vendor/github.com/containerd/containerd/api/events/image.proto @@ -18,7 +18,7 @@ syntax = "proto3"; package containerd.services.images.v1; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; diff --git a/vendor/github.com/containerd/containerd/api/events/image_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/image_fieldpath.pb.go new file mode 100644 index 0000000000..2a56fcf9d8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/image_fieldpath.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/image.proto +package events + +import ( + strings "strings" +) + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ImageCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ImageUpdate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ImageDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/events/namespace.pb.go b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go index d406a987e9..801c1219e8 100644 --- a/vendor/github.com/containerd/containerd/api/events/namespace.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go @@ -1,1109 +1,330 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/events/namespace.proto package events import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type NamespaceCreate struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *NamespaceCreate) Reset() { *m = NamespaceCreate{} } -func (*NamespaceCreate) ProtoMessage() {} -func (*NamespaceCreate) Descriptor() ([]byte, []int) { - return fileDescriptor_6cd45d1d5adffe29, []int{0} -} -func (m *NamespaceCreate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamespaceCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NamespaceCreate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *NamespaceCreate) Reset() { + *x = NamespaceCreate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *NamespaceCreate) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamespaceCreate.Merge(m, src) -} -func (m *NamespaceCreate) XXX_Size() int { - return m.Size() -} -func (m *NamespaceCreate) XXX_DiscardUnknown() { - xxx_messageInfo_NamespaceCreate.DiscardUnknown(m) + +func (x *NamespaceCreate) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_NamespaceCreate proto.InternalMessageInfo +func (*NamespaceCreate) ProtoMessage() {} + +func (x *NamespaceCreate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceCreate.ProtoReflect.Descriptor instead. +func (*NamespaceCreate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_namespace_proto_rawDescGZIP(), []int{0} +} + +func (x *NamespaceCreate) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamespaceCreate) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type NamespaceUpdate struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *NamespaceUpdate) Reset() { *m = NamespaceUpdate{} } -func (*NamespaceUpdate) ProtoMessage() {} -func (*NamespaceUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_6cd45d1d5adffe29, []int{1} -} -func (m *NamespaceUpdate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamespaceUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NamespaceUpdate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *NamespaceUpdate) Reset() { + *x = NamespaceUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *NamespaceUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamespaceUpdate.Merge(m, src) -} -func (m *NamespaceUpdate) XXX_Size() int { - return m.Size() -} -func (m *NamespaceUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_NamespaceUpdate.DiscardUnknown(m) + +func (x *NamespaceUpdate) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_NamespaceUpdate proto.InternalMessageInfo +func (*NamespaceUpdate) ProtoMessage() {} + +func (x *NamespaceUpdate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceUpdate.ProtoReflect.Descriptor instead. +func (*NamespaceUpdate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_namespace_proto_rawDescGZIP(), []int{1} +} + +func (x *NamespaceUpdate) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamespaceUpdate) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type NamespaceDelete struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *NamespaceDelete) Reset() { + *x = NamespaceDelete{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamespaceDelete) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamespaceDelete) Reset() { *m = NamespaceDelete{} } func (*NamespaceDelete) ProtoMessage() {} + +func (x *NamespaceDelete) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamespaceDelete.ProtoReflect.Descriptor instead. func (*NamespaceDelete) Descriptor() ([]byte, []int) { - return fileDescriptor_6cd45d1d5adffe29, []int{2} -} -func (m *NamespaceDelete) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamespaceDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NamespaceDelete.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NamespaceDelete) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamespaceDelete.Merge(m, src) -} -func (m *NamespaceDelete) XXX_Size() int { - return m.Size() -} -func (m *NamespaceDelete) XXX_DiscardUnknown() { - xxx_messageInfo_NamespaceDelete.DiscardUnknown(m) + return file_github_com_containerd_containerd_api_events_namespace_proto_rawDescGZIP(), []int{2} } -var xxx_messageInfo_NamespaceDelete proto.InternalMessageInfo - -func init() { - proto.RegisterType((*NamespaceCreate)(nil), "containerd.events.NamespaceCreate") - proto.RegisterMapType((map[string]string)(nil), "containerd.events.NamespaceCreate.LabelsEntry") - proto.RegisterType((*NamespaceUpdate)(nil), "containerd.events.NamespaceUpdate") - proto.RegisterMapType((map[string]string)(nil), "containerd.events.NamespaceUpdate.LabelsEntry") - proto.RegisterType((*NamespaceDelete)(nil), "containerd.events.NamespaceDelete") +func (x *NamespaceDelete) GetName() string { + if x != nil { + return x.Name + } + return "" } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/events/namespace.proto", fileDescriptor_6cd45d1d5adffe29) -} +var File_github_com_containerd_containerd_api_events_namespace_proto protoreflect.FileDescriptor -var fileDescriptor_6cd45d1d5adffe29 = []byte{ - // 296 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0xe7, 0x25, - 0xe6, 0xa6, 0x16, 0x17, 0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22, - 0x94, 0xe9, 0x41, 0x94, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x65, 0xf5, 0x41, 0x2c, 0x88, - 0x42, 0x29, 0x07, 0x82, 0xb6, 0x80, 0xd5, 0x25, 0x95, 0xa6, 0xe9, 0x17, 0xe4, 0x94, 0xa6, 0x67, - 0xe6, 0xe9, 0xa7, 0x65, 0xa6, 0xe6, 0xa4, 0x14, 0x24, 0x96, 0x64, 0x40, 0x4c, 0x50, 0x5a, 0xc1, - 0xc8, 0xc5, 0xef, 0x07, 0xb3, 0xde, 0xb9, 0x28, 0x35, 0xb1, 0x24, 0x55, 0x48, 0x88, 0x8b, 0x05, - 0xe4, 0x22, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0xc8, 0x8d, 0x8b, 0x2d, 0x27, - 0x31, 0x29, 0x35, 0xa7, 0x58, 0x82, 0x49, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x4f, 0x0f, 0xc3, 0x8d, - 0x7a, 0x68, 0xe6, 0xe8, 0xf9, 0x80, 0x35, 0xb8, 0xe6, 0x95, 0x14, 0x55, 0x06, 0x41, 0x75, 0x4b, - 0x59, 0x72, 0x71, 0x23, 0x09, 0x0b, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x6d, 0x02, 0x31, - 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x15, - 0x93, 0x05, 0x23, 0xaa, 0x53, 0x43, 0x0b, 0x52, 0xa8, 0xe2, 0x54, 0x88, 0x39, 0xd4, 0x76, 0xaa, - 0x2a, 0x92, 0x4b, 0x5d, 0x52, 0x73, 0x52, 0xb1, 0xbb, 0xd4, 0x29, 0xe0, 0xc4, 0x43, 0x39, 0x86, - 0x1b, 0x0f, 0xe5, 0x18, 0x1a, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, - 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x0b, 0xbe, 0xc8, 0x31, 0x46, 0x19, 0x91, 0x90, 0x84, 0xac, 0x21, - 0x54, 0x04, 0x43, 0x04, 0x63, 0x12, 0x1b, 0x38, 0x66, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x00, 0x50, 0x87, 0x59, 0x83, 0x02, 0x00, 0x00, -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *NamespaceCreate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - case "labels": - // Labels fields have been special-cased by name. If this breaks, - // add better special casing to fieldpath plugin. - if len(m.Labels) == 0 { - return "", false - } - value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] - return value, ok - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *NamespaceUpdate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - case "labels": - // Labels fields have been special-cased by name. If this breaks, - // add better special casing to fieldpath plugin. - if len(m.Labels) == 0 { - return "", false - } - value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] - return value, ok - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *NamespaceDelete) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - } - return "", false -} -func (m *NamespaceCreate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamespaceCreate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamespaceCreate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintNamespace(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintNamespace(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintNamespace(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *NamespaceUpdate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamespaceUpdate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamespaceUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintNamespace(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintNamespace(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintNamespace(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *NamespaceDelete) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamespaceDelete) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamespaceDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int { - offset -= sovNamespace(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *NamespaceCreate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v))) - n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *NamespaceUpdate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v))) - n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *NamespaceDelete) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovNamespace(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozNamespace(x uint64) (n int) { - return sovNamespace(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *NamespaceCreate) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&NamespaceCreate{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceUpdate) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&NamespaceUpdate{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceDelete) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamespaceDelete{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringNamespace(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *NamespaceCreate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceCreate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceCreate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthNamespace - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthNamespace - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceUpdate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceUpdate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceUpdate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthNamespace - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthNamespace - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceDelete) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceDelete: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceDelete: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipNamespace(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNamespace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNamespace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNamespace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthNamespace - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupNamespace - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthNamespace - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_api_events_namespace_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x1a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a, 0x0f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa8, 0x01, + 0x0a, 0x0f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, + 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x0f, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, + 0x38, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( - ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowNamespace = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupNamespace = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_events_namespace_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_events_namespace_proto_rawDescData = file_github_com_containerd_containerd_api_events_namespace_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_events_namespace_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_events_namespace_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_events_namespace_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_events_namespace_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_events_namespace_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_github_com_containerd_containerd_api_events_namespace_proto_goTypes = []interface{}{ + (*NamespaceCreate)(nil), // 0: containerd.events.NamespaceCreate + (*NamespaceUpdate)(nil), // 1: containerd.events.NamespaceUpdate + (*NamespaceDelete)(nil), // 2: containerd.events.NamespaceDelete + nil, // 3: containerd.events.NamespaceCreate.LabelsEntry + nil, // 4: containerd.events.NamespaceUpdate.LabelsEntry +} +var file_github_com_containerd_containerd_api_events_namespace_proto_depIdxs = []int32{ + 3, // 0: containerd.events.NamespaceCreate.labels:type_name -> containerd.events.NamespaceCreate.LabelsEntry + 4, // 1: containerd.events.NamespaceUpdate.labels:type_name -> containerd.events.NamespaceUpdate.LabelsEntry + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_events_namespace_proto_init() } +func file_github_com_containerd_containerd_api_events_namespace_proto_init() { + if File_github_com_containerd_containerd_api_events_namespace_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceCreate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceDelete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_events_namespace_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_events_namespace_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_events_namespace_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_events_namespace_proto = out.File + file_github_com_containerd_containerd_api_events_namespace_proto_rawDesc = nil + file_github_com_containerd_containerd_api_events_namespace_proto_goTypes = nil + file_github_com_containerd_containerd_api_events_namespace_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/events/namespace.proto b/vendor/github.com/containerd/containerd/api/events/namespace.proto index 53a8ee6306..9bae531d7f 100644 --- a/vendor/github.com/containerd/containerd/api/events/namespace.proto +++ b/vendor/github.com/containerd/containerd/api/events/namespace.proto @@ -18,8 +18,7 @@ syntax = "proto3"; package containerd.events; -import weak "gogoproto/gogo.proto"; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; diff --git a/vendor/github.com/containerd/containerd/api/events/namespace_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/namespace_fieldpath.pb.go new file mode 100644 index 0000000000..93d20a6767 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/namespace_fieldpath.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/namespace.proto +package events + +import ( + strings "strings" +) + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *NamespaceCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *NamespaceUpdate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *NamespaceDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/events/sandbox.pb.go b/vendor/github.com/containerd/containerd/api/events/sandbox.pb.go new file mode 100644 index 0000000000..08f5e70e48 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/sandbox.pb.go @@ -0,0 +1,316 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/events/sandbox.proto + +package events + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SandboxCreate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *SandboxCreate) Reset() { + *x = SandboxCreate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_sandbox_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SandboxCreate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxCreate) ProtoMessage() {} + +func (x *SandboxCreate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_sandbox_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxCreate.ProtoReflect.Descriptor instead. +func (*SandboxCreate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_sandbox_proto_rawDescGZIP(), []int{0} +} + +func (x *SandboxCreate) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type SandboxStart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *SandboxStart) Reset() { + *x = SandboxStart{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_sandbox_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SandboxStart) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxStart) ProtoMessage() {} + +func (x *SandboxStart) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_sandbox_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxStart.ProtoReflect.Descriptor instead. +func (*SandboxStart) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_sandbox_proto_rawDescGZIP(), []int{1} +} + +func (x *SandboxStart) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type SandboxExit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + ExitStatus uint32 `protobuf:"varint,2,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` +} + +func (x *SandboxExit) Reset() { + *x = SandboxExit{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_sandbox_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SandboxExit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxExit) ProtoMessage() {} + +func (x *SandboxExit) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_sandbox_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxExit.ProtoReflect.Descriptor instead. +func (*SandboxExit) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_sandbox_proto_rawDescGZIP(), []int{2} +} + +func (x *SandboxExit) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *SandboxExit) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *SandboxExit) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +var File_github_com_containerd_containerd_api_events_sandbox_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_events_sandbox_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x1f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x2e, 0x0a, 0x0d, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, + 0x2d, 0x0a, 0x0c, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x86, + 0x01, 0x0a, 0x0b, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x45, 0x78, 0x69, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x1f, 0x0a, + 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, + 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, + 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_events_sandbox_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_events_sandbox_proto_rawDescData = file_github_com_containerd_containerd_api_events_sandbox_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_events_sandbox_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_events_sandbox_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_events_sandbox_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_events_sandbox_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_events_sandbox_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_events_sandbox_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_github_com_containerd_containerd_api_events_sandbox_proto_goTypes = []interface{}{ + (*SandboxCreate)(nil), // 0: containerd.events.SandboxCreate + (*SandboxStart)(nil), // 1: containerd.events.SandboxStart + (*SandboxExit)(nil), // 2: containerd.events.SandboxExit + (*timestamppb.Timestamp)(nil), // 3: google.protobuf.Timestamp +} +var file_github_com_containerd_containerd_api_events_sandbox_proto_depIdxs = []int32{ + 3, // 0: containerd.events.SandboxExit.exited_at:type_name -> google.protobuf.Timestamp + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_events_sandbox_proto_init() } +func file_github_com_containerd_containerd_api_events_sandbox_proto_init() { + if File_github_com_containerd_containerd_api_events_sandbox_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_events_sandbox_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxCreate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_sandbox_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxStart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_sandbox_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxExit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_events_sandbox_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_events_sandbox_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_events_sandbox_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_events_sandbox_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_events_sandbox_proto = out.File + file_github_com_containerd_containerd_api_events_sandbox_proto_rawDesc = nil + file_github_com_containerd_containerd_api_events_sandbox_proto_goTypes = nil + file_github_com_containerd_containerd_api_events_sandbox_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/events/sandbox.proto b/vendor/github.com/containerd/containerd/api/events/sandbox.proto new file mode 100644 index 0000000000..f1c5195e5a --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/sandbox.proto @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.events; + +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/containerd/containerd/api/events;events"; + +message SandboxCreate { + string sandbox_id = 1; +} + +message SandboxStart { + string sandbox_id = 1; +} + +message SandboxExit { + string sandbox_id = 1; + uint32 exit_status = 2; + google.protobuf.Timestamp exited_at = 3; +} diff --git a/vendor/github.com/containerd/containerd/api/events/sandbox_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/sandbox_fieldpath.pb.go new file mode 100644 index 0000000000..5afb99457a --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/sandbox_fieldpath.pb.go @@ -0,0 +1,44 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/sandbox.proto +package events + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *SandboxCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "sandbox_id": + return string(m.SandboxID), len(m.SandboxID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *SandboxStart) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "sandbox_id": + return string(m.SandboxID), len(m.SandboxID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *SandboxExit) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: exit_status + // unhandled: exited_at + case "sandbox_id": + return string(m.SandboxID), len(m.SandboxID) > 0 + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go index bec25c3a7c..e074f9df21 100644 --- a/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go @@ -1,848 +1,342 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/events/snapshot.proto package events import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type SnapshotPrepare struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"` + Snapshotter string `protobuf:"bytes,5,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` } -func (m *SnapshotPrepare) Reset() { *m = SnapshotPrepare{} } -func (*SnapshotPrepare) ProtoMessage() {} -func (*SnapshotPrepare) Descriptor() ([]byte, []int) { - return fileDescriptor_bd6c184d3d9aa5f2, []int{0} -} -func (m *SnapshotPrepare) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotPrepare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotPrepare.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *SnapshotPrepare) Reset() { + *x = SnapshotPrepare{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *SnapshotPrepare) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotPrepare.Merge(m, src) -} -func (m *SnapshotPrepare) XXX_Size() int { - return m.Size() -} -func (m *SnapshotPrepare) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotPrepare.DiscardUnknown(m) + +func (x *SnapshotPrepare) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_SnapshotPrepare proto.InternalMessageInfo +func (*SnapshotPrepare) ProtoMessage() {} + +func (x *SnapshotPrepare) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SnapshotPrepare.ProtoReflect.Descriptor instead. +func (*SnapshotPrepare) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescGZIP(), []int{0} +} + +func (x *SnapshotPrepare) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *SnapshotPrepare) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *SnapshotPrepare) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} type SnapshotCommit struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Snapshotter string `protobuf:"bytes,5,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` } -func (m *SnapshotCommit) Reset() { *m = SnapshotCommit{} } -func (*SnapshotCommit) ProtoMessage() {} -func (*SnapshotCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_bd6c184d3d9aa5f2, []int{1} -} -func (m *SnapshotCommit) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotCommit.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *SnapshotCommit) Reset() { + *x = SnapshotCommit{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *SnapshotCommit) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotCommit.Merge(m, src) -} -func (m *SnapshotCommit) XXX_Size() int { - return m.Size() -} -func (m *SnapshotCommit) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotCommit.DiscardUnknown(m) + +func (x *SnapshotCommit) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_SnapshotCommit proto.InternalMessageInfo +func (*SnapshotCommit) ProtoMessage() {} + +func (x *SnapshotCommit) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SnapshotCommit.ProtoReflect.Descriptor instead. +func (*SnapshotCommit) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescGZIP(), []int{1} +} + +func (x *SnapshotCommit) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *SnapshotCommit) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SnapshotCommit) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} type SnapshotRemove struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Snapshotter string `protobuf:"bytes,5,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` +} + +func (x *SnapshotRemove) Reset() { + *x = SnapshotRemove{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SnapshotRemove) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SnapshotRemove) Reset() { *m = SnapshotRemove{} } func (*SnapshotRemove) ProtoMessage() {} + +func (x *SnapshotRemove) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SnapshotRemove.ProtoReflect.Descriptor instead. func (*SnapshotRemove) Descriptor() ([]byte, []int) { - return fileDescriptor_bd6c184d3d9aa5f2, []int{2} -} -func (m *SnapshotRemove) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotRemove) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotRemove.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotRemove) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotRemove.Merge(m, src) -} -func (m *SnapshotRemove) XXX_Size() int { - return m.Size() -} -func (m *SnapshotRemove) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotRemove.DiscardUnknown(m) + return file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescGZIP(), []int{2} } -var xxx_messageInfo_SnapshotRemove proto.InternalMessageInfo - -func init() { - proto.RegisterType((*SnapshotPrepare)(nil), "containerd.events.SnapshotPrepare") - proto.RegisterType((*SnapshotCommit)(nil), "containerd.events.SnapshotCommit") - proto.RegisterType((*SnapshotRemove)(nil), "containerd.events.SnapshotRemove") +func (x *SnapshotRemove) GetKey() string { + if x != nil { + return x.Key + } + return "" } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/events/snapshot.proto", fileDescriptor_bd6c184d3d9aa5f2) +func (x *SnapshotRemove) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" } -var fileDescriptor_bd6c184d3d9aa5f2 = []byte{ - // 235 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4a, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x17, 0xe7, - 0x25, 0x16, 0x14, 0x67, 0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22, 0x54, - 0xe9, 0x41, 0x54, 0x48, 0x39, 0x10, 0x34, 0x0e, 0xac, 0x35, 0xa9, 0x34, 0x4d, 0xbf, 0x20, 0xa7, - 0x34, 0x3d, 0x33, 0x4f, 0x3f, 0x2d, 0x33, 0x35, 0x27, 0xa5, 0x20, 0xb1, 0x24, 0x03, 0x62, 0xa8, - 0x92, 0x35, 0x17, 0x7f, 0x30, 0xd4, 0x9a, 0x80, 0xa2, 0xd4, 0x82, 0xc4, 0xa2, 0x54, 0x21, 0x01, - 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x10, 0x53, 0x48, 0x8c, - 0x8b, 0x0d, 0x24, 0x93, 0x57, 0x22, 0xc1, 0x04, 0x16, 0x84, 0xf2, 0x94, 0xcc, 0xb8, 0xf8, 0x60, - 0x9a, 0x9d, 0xf3, 0x73, 0x73, 0x33, 0x4b, 0xb0, 0xe8, 0x15, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, - 0x85, 0xea, 0x04, 0xb3, 0x95, 0x94, 0x10, 0xfa, 0x82, 0x52, 0x73, 0xf3, 0xcb, 0xb0, 0xd8, 0xe9, - 0x14, 0x70, 0xe2, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, - 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x05, 0x5f, 0xe4, 0x18, 0xa3, - 0x8c, 0x48, 0x08, 0x47, 0x6b, 0x08, 0x15, 0xc1, 0x90, 0xc4, 0x06, 0xf6, 0xb3, 0x31, 0x20, 0x00, - 0x00, 0xff, 0xff, 0x69, 0x66, 0xa9, 0x2a, 0x86, 0x01, 0x00, 0x00, -} +var File_github_com_containerd_containerd_api_events_snapshot_proto protoreflect.FileDescriptor -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *SnapshotPrepare) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "key": - return string(m.Key), len(m.Key) > 0 - case "parent": - return string(m.Parent), len(m.Parent) > 0 - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *SnapshotCommit) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "key": - return string(m.Key), len(m.Key) > 0 - case "name": - return string(m.Name), len(m.Name) > 0 - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *SnapshotRemove) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "key": - return string(m.Key), len(m.Key) > 0 - } - return "", false -} -func (m *SnapshotPrepare) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotPrepare) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotPrepare) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Parent) > 0 { - i -= len(m.Parent) - copy(dAtA[i:], m.Parent) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Parent))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SnapshotCommit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotCommit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SnapshotRemove) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotRemove) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotRemove) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int { - offset -= sovSnapshot(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *SnapshotPrepare) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - l = len(m.Parent) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SnapshotCommit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SnapshotRemove) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovSnapshot(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSnapshot(x uint64) (n int) { - return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *SnapshotPrepare) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SnapshotPrepare{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Parent:` + fmt.Sprintf("%v", this.Parent) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *SnapshotCommit) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SnapshotCommit{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *SnapshotRemove) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SnapshotRemove{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringSnapshot(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *SnapshotPrepare) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotPrepare: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotPrepare: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parent = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotCommit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotCommit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotCommit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotRemove) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotRemove: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotRemove: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSnapshot(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshot - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshot - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshot - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSnapshot - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSnapshot - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSnapshot - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_api_events_snapshot_proto_rawDesc = []byte{ + 0x0a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, + 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x5d, 0x0a, 0x0f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x50, 0x72, 0x65, + 0x70, 0x61, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, + 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, + 0x22, 0x58, 0x0a, 0x0e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x22, 0x44, 0x0a, 0x0e, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, + 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, + 0x42, 0x38, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( - ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSnapshot = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescData = file_github_com_containerd_containerd_api_events_snapshot_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_github_com_containerd_containerd_api_events_snapshot_proto_goTypes = []interface{}{ + (*SnapshotPrepare)(nil), // 0: containerd.events.SnapshotPrepare + (*SnapshotCommit)(nil), // 1: containerd.events.SnapshotCommit + (*SnapshotRemove)(nil), // 2: containerd.events.SnapshotRemove +} +var file_github_com_containerd_containerd_api_events_snapshot_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_events_snapshot_proto_init() } +func file_github_com_containerd_containerd_api_events_snapshot_proto_init() { + if File_github_com_containerd_containerd_api_events_snapshot_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SnapshotPrepare); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SnapshotCommit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SnapshotRemove); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_events_snapshot_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_events_snapshot_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_events_snapshot_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_events_snapshot_proto = out.File + file_github_com_containerd_containerd_api_events_snapshot_proto_rawDesc = nil + file_github_com_containerd_containerd_api_events_snapshot_proto_goTypes = nil + file_github_com_containerd_containerd_api_events_snapshot_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/events/snapshot.proto b/vendor/github.com/containerd/containerd/api/events/snapshot.proto index eb1f06725c..effd869136 100644 --- a/vendor/github.com/containerd/containerd/api/events/snapshot.proto +++ b/vendor/github.com/containerd/containerd/api/events/snapshot.proto @@ -18,7 +18,7 @@ syntax = "proto3"; package containerd.events; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; @@ -26,13 +26,16 @@ option (containerd.plugin.fieldpath_all) = true; message SnapshotPrepare { string key = 1; string parent = 2; + string snapshotter = 5; } message SnapshotCommit { string key = 1; string name = 2; + string snapshotter = 5; } message SnapshotRemove { string key = 1; + string snapshotter = 5; } diff --git a/vendor/github.com/containerd/containerd/api/events/snapshot_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/snapshot_fieldpath.pb.go new file mode 100644 index 0000000000..c89d1ab4a3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/snapshot_fieldpath.pb.go @@ -0,0 +1,52 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/snapshot.proto +package events + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *SnapshotPrepare) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "key": + return string(m.Key), len(m.Key) > 0 + case "parent": + return string(m.Parent), len(m.Parent) > 0 + case "snapshotter": + return string(m.Snapshotter), len(m.Snapshotter) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *SnapshotCommit) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "key": + return string(m.Key), len(m.Key) > 0 + case "name": + return string(m.Name), len(m.Name) > 0 + case "snapshotter": + return string(m.Snapshotter), len(m.Snapshotter) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *SnapshotRemove) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "key": + return string(m.Key), len(m.Key) > 0 + case "snapshotter": + return string(m.Snapshotter), len(m.Snapshotter) > 0 + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/events/task.pb.go b/vendor/github.com/containerd/containerd/api/events/task.pb.go index f8f3a3f3d3..33fd521b6c 100644 --- a/vendor/github.com/containerd/containerd/api/events/task.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/task.pb.go @@ -1,3261 +1,1020 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/events/task.proto package events import ( - fmt "fmt" types "github.com/containerd/containerd/api/types" - proto "github.com/gogo/protobuf/proto" - _ "github.com/gogo/protobuf/types" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type TaskCreate struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` - Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` - IO *TaskIO `protobuf:"bytes,4,opt,name=io,proto3" json:"io,omitempty"` - Checkpoint string `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` - Pid uint32 `protobuf:"varint,6,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` + Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` + IO *TaskIO `protobuf:"bytes,4,opt,name=io,proto3" json:"io,omitempty"` + Checkpoint string `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` + Pid uint32 `protobuf:"varint,6,opt,name=pid,proto3" json:"pid,omitempty"` } -func (m *TaskCreate) Reset() { *m = TaskCreate{} } -func (*TaskCreate) ProtoMessage() {} -func (*TaskCreate) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{0} -} -func (m *TaskCreate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskCreate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskCreate) Reset() { + *x = TaskCreate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *TaskCreate) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskCreate.Merge(m, src) -} -func (m *TaskCreate) XXX_Size() int { - return m.Size() -} -func (m *TaskCreate) XXX_DiscardUnknown() { - xxx_messageInfo_TaskCreate.DiscardUnknown(m) + +func (x *TaskCreate) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_TaskCreate proto.InternalMessageInfo +func (*TaskCreate) ProtoMessage() {} + +func (x *TaskCreate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskCreate.ProtoReflect.Descriptor instead. +func (*TaskCreate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{0} +} + +func (x *TaskCreate) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *TaskCreate) GetBundle() string { + if x != nil { + return x.Bundle + } + return "" +} + +func (x *TaskCreate) GetRootfs() []*types.Mount { + if x != nil { + return x.Rootfs + } + return nil +} + +func (x *TaskCreate) GetIO() *TaskIO { + if x != nil { + return x.IO + } + return nil +} + +func (x *TaskCreate) GetCheckpoint() string { + if x != nil { + return x.Checkpoint + } + return "" +} + +func (x *TaskCreate) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} type TaskStart struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` } -func (m *TaskStart) Reset() { *m = TaskStart{} } -func (*TaskStart) ProtoMessage() {} -func (*TaskStart) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{1} -} -func (m *TaskStart) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskStart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskStart.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskStart) Reset() { + *x = TaskStart{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *TaskStart) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskStart.Merge(m, src) -} -func (m *TaskStart) XXX_Size() int { - return m.Size() -} -func (m *TaskStart) XXX_DiscardUnknown() { - xxx_messageInfo_TaskStart.DiscardUnknown(m) + +func (x *TaskStart) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_TaskStart proto.InternalMessageInfo +func (*TaskStart) ProtoMessage() {} + +func (x *TaskStart) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskStart.ProtoReflect.Descriptor instead. +func (*TaskStart) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{1} +} + +func (x *TaskStart) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *TaskStart) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} type TaskDelete struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - ExitStatus uint32 `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` // id is the specific exec. By default if omitted will be `""` thus matches // the init exec of the task matching `container_id`. - ID string `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ID string `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"` } -func (m *TaskDelete) Reset() { *m = TaskDelete{} } -func (*TaskDelete) ProtoMessage() {} -func (*TaskDelete) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{2} -} -func (m *TaskDelete) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskDelete.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskDelete) Reset() { + *x = TaskDelete{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *TaskDelete) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskDelete.Merge(m, src) -} -func (m *TaskDelete) XXX_Size() int { - return m.Size() -} -func (m *TaskDelete) XXX_DiscardUnknown() { - xxx_messageInfo_TaskDelete.DiscardUnknown(m) + +func (x *TaskDelete) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_TaskDelete proto.InternalMessageInfo +func (*TaskDelete) ProtoMessage() {} + +func (x *TaskDelete) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskDelete.ProtoReflect.Descriptor instead. +func (*TaskDelete) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{2} +} + +func (x *TaskDelete) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *TaskDelete) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *TaskDelete) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *TaskDelete) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +func (x *TaskDelete) GetID() string { + if x != nil { + return x.ID + } + return "" +} type TaskIO struct { - Stdin string `protobuf:"bytes,1,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,2,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,3,opt,name=stderr,proto3" json:"stderr,omitempty"` - Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Stdin string `protobuf:"bytes,1,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,2,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,3,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` } -func (m *TaskIO) Reset() { *m = TaskIO{} } -func (*TaskIO) ProtoMessage() {} -func (*TaskIO) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{3} -} -func (m *TaskIO) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskIO) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskIO.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskIO) Reset() { + *x = TaskIO{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *TaskIO) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskIO.Merge(m, src) -} -func (m *TaskIO) XXX_Size() int { - return m.Size() -} -func (m *TaskIO) XXX_DiscardUnknown() { - xxx_messageInfo_TaskIO.DiscardUnknown(m) + +func (x *TaskIO) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_TaskIO proto.InternalMessageInfo +func (*TaskIO) ProtoMessage() {} + +func (x *TaskIO) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskIO.ProtoReflect.Descriptor instead. +func (*TaskIO) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{3} +} + +func (x *TaskIO) GetStdin() string { + if x != nil { + return x.Stdin + } + return "" +} + +func (x *TaskIO) GetStdout() string { + if x != nil { + return x.Stdout + } + return "" +} + +func (x *TaskIO) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" +} + +func (x *TaskIO) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} type TaskExit struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - ExitStatus uint32 `protobuf:"varint,4,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,5,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,4,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` } -func (m *TaskExit) Reset() { *m = TaskExit{} } -func (*TaskExit) ProtoMessage() {} -func (*TaskExit) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{4} -} -func (m *TaskExit) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskExit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskExit.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskExit) Reset() { + *x = TaskExit{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *TaskExit) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskExit.Merge(m, src) -} -func (m *TaskExit) XXX_Size() int { - return m.Size() -} -func (m *TaskExit) XXX_DiscardUnknown() { - xxx_messageInfo_TaskExit.DiscardUnknown(m) + +func (x *TaskExit) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_TaskExit proto.InternalMessageInfo +func (*TaskExit) ProtoMessage() {} + +func (x *TaskExit) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskExit.ProtoReflect.Descriptor instead. +func (*TaskExit) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{4} +} + +func (x *TaskExit) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *TaskExit) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *TaskExit) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *TaskExit) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *TaskExit) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} type TaskOOM struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -func (m *TaskOOM) Reset() { *m = TaskOOM{} } -func (*TaskOOM) ProtoMessage() {} -func (*TaskOOM) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{5} -} -func (m *TaskOOM) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskOOM) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskOOM.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskOOM) Reset() { + *x = TaskOOM{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *TaskOOM) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskOOM.Merge(m, src) -} -func (m *TaskOOM) XXX_Size() int { - return m.Size() -} -func (m *TaskOOM) XXX_DiscardUnknown() { - xxx_messageInfo_TaskOOM.DiscardUnknown(m) + +func (x *TaskOOM) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_TaskOOM proto.InternalMessageInfo +func (*TaskOOM) ProtoMessage() {} + +func (x *TaskOOM) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskOOM.ProtoReflect.Descriptor instead. +func (*TaskOOM) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{5} +} + +func (x *TaskOOM) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} type TaskExecAdded struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` } -func (m *TaskExecAdded) Reset() { *m = TaskExecAdded{} } -func (*TaskExecAdded) ProtoMessage() {} -func (*TaskExecAdded) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{6} -} -func (m *TaskExecAdded) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskExecAdded) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskExecAdded.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskExecAdded) Reset() { + *x = TaskExecAdded{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *TaskExecAdded) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskExecAdded.Merge(m, src) -} -func (m *TaskExecAdded) XXX_Size() int { - return m.Size() -} -func (m *TaskExecAdded) XXX_DiscardUnknown() { - xxx_messageInfo_TaskExecAdded.DiscardUnknown(m) + +func (x *TaskExecAdded) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_TaskExecAdded proto.InternalMessageInfo +func (*TaskExecAdded) ProtoMessage() {} + +func (x *TaskExecAdded) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskExecAdded.ProtoReflect.Descriptor instead. +func (*TaskExecAdded) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{6} +} + +func (x *TaskExecAdded) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *TaskExecAdded) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} type TaskExecStarted struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` } -func (m *TaskExecStarted) Reset() { *m = TaskExecStarted{} } -func (*TaskExecStarted) ProtoMessage() {} -func (*TaskExecStarted) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{7} -} -func (m *TaskExecStarted) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskExecStarted) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskExecStarted.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskExecStarted) Reset() { + *x = TaskExecStarted{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *TaskExecStarted) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskExecStarted.Merge(m, src) -} -func (m *TaskExecStarted) XXX_Size() int { - return m.Size() -} -func (m *TaskExecStarted) XXX_DiscardUnknown() { - xxx_messageInfo_TaskExecStarted.DiscardUnknown(m) + +func (x *TaskExecStarted) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_TaskExecStarted proto.InternalMessageInfo +func (*TaskExecStarted) ProtoMessage() {} + +func (x *TaskExecStarted) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskExecStarted.ProtoReflect.Descriptor instead. +func (*TaskExecStarted) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{7} +} + +func (x *TaskExecStarted) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *TaskExecStarted) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +func (x *TaskExecStarted) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} type TaskPaused struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -func (m *TaskPaused) Reset() { *m = TaskPaused{} } -func (*TaskPaused) ProtoMessage() {} -func (*TaskPaused) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{8} -} -func (m *TaskPaused) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskPaused) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskPaused.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskPaused) Reset() { + *x = TaskPaused{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *TaskPaused) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskPaused.Merge(m, src) -} -func (m *TaskPaused) XXX_Size() int { - return m.Size() -} -func (m *TaskPaused) XXX_DiscardUnknown() { - xxx_messageInfo_TaskPaused.DiscardUnknown(m) + +func (x *TaskPaused) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_TaskPaused proto.InternalMessageInfo +func (*TaskPaused) ProtoMessage() {} + +func (x *TaskPaused) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskPaused.ProtoReflect.Descriptor instead. +func (*TaskPaused) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{8} +} + +func (x *TaskPaused) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} type TaskResumed struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -func (m *TaskResumed) Reset() { *m = TaskResumed{} } -func (*TaskResumed) ProtoMessage() {} -func (*TaskResumed) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{9} -} -func (m *TaskResumed) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskResumed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskResumed.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskResumed) Reset() { + *x = TaskResumed{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *TaskResumed) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskResumed.Merge(m, src) -} -func (m *TaskResumed) XXX_Size() int { - return m.Size() -} -func (m *TaskResumed) XXX_DiscardUnknown() { - xxx_messageInfo_TaskResumed.DiscardUnknown(m) + +func (x *TaskResumed) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_TaskResumed proto.InternalMessageInfo +func (*TaskResumed) ProtoMessage() {} + +func (x *TaskResumed) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskResumed.ProtoReflect.Descriptor instead. +func (*TaskResumed) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{9} +} + +func (x *TaskResumed) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} type TaskCheckpointed struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Checkpoint string `protobuf:"bytes,2,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Checkpoint string `protobuf:"bytes,2,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` +} + +func (x *TaskCheckpointed) Reset() { + *x = TaskCheckpointed{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaskCheckpointed) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TaskCheckpointed) Reset() { *m = TaskCheckpointed{} } func (*TaskCheckpointed) ProtoMessage() {} + +func (x *TaskCheckpointed) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskCheckpointed.ProtoReflect.Descriptor instead. func (*TaskCheckpointed) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{10} -} -func (m *TaskCheckpointed) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskCheckpointed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskCheckpointed.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TaskCheckpointed) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskCheckpointed.Merge(m, src) -} -func (m *TaskCheckpointed) XXX_Size() int { - return m.Size() -} -func (m *TaskCheckpointed) XXX_DiscardUnknown() { - xxx_messageInfo_TaskCheckpointed.DiscardUnknown(m) -} - -var xxx_messageInfo_TaskCheckpointed proto.InternalMessageInfo - -func init() { - proto.RegisterType((*TaskCreate)(nil), "containerd.events.TaskCreate") - proto.RegisterType((*TaskStart)(nil), "containerd.events.TaskStart") - proto.RegisterType((*TaskDelete)(nil), "containerd.events.TaskDelete") - proto.RegisterType((*TaskIO)(nil), "containerd.events.TaskIO") - proto.RegisterType((*TaskExit)(nil), "containerd.events.TaskExit") - proto.RegisterType((*TaskOOM)(nil), "containerd.events.TaskOOM") - proto.RegisterType((*TaskExecAdded)(nil), "containerd.events.TaskExecAdded") - proto.RegisterType((*TaskExecStarted)(nil), "containerd.events.TaskExecStarted") - proto.RegisterType((*TaskPaused)(nil), "containerd.events.TaskPaused") - proto.RegisterType((*TaskResumed)(nil), "containerd.events.TaskResumed") - proto.RegisterType((*TaskCheckpointed)(nil), "containerd.events.TaskCheckpointed") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/events/task.proto", fileDescriptor_8db0813f7adfb63c) -} - -var fileDescriptor_8db0813f7adfb63c = []byte{ - // 644 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0x63, 0xa7, 0x75, 0xd3, 0x09, 0x55, 0x8b, 0x55, 0x95, 0x90, 0x83, 0x1d, 0x99, 0x4b, - 0x4e, 0xb6, 0x08, 0x12, 0x17, 0x84, 0xd4, 0xa4, 0xe1, 0x90, 0x43, 0x95, 0xe2, 0xf6, 0x50, 0x71, - 0x89, 0x36, 0xd9, 0x4d, 0xb2, 0x34, 0xf1, 0x5a, 0xf6, 0x18, 0x15, 0x89, 0x03, 0x8f, 0xc0, 0x23, - 0xf0, 0x38, 0x3d, 0x20, 0xc4, 0x91, 0x53, 0xa0, 0x7e, 0x00, 0x4e, 0x3c, 0x00, 0x5a, 0xaf, 0x93, - 0xb6, 0x54, 0x7c, 0x59, 0xe2, 0x94, 0x9d, 0xd9, 0xd9, 0xff, 0xec, 0xfc, 0x76, 0x3c, 0x81, 0xc7, - 0x13, 0x8e, 0xd3, 0x64, 0xe8, 0x8e, 0xc4, 0xdc, 0x1b, 0x89, 0x00, 0x09, 0x0f, 0x58, 0x44, 0xaf, - 0x2f, 0x49, 0xc8, 0x3d, 0xf6, 0x8a, 0x05, 0x18, 0x7b, 0x48, 0xe2, 0x33, 0x37, 0x8c, 0x04, 0x0a, - 0xf3, 0xee, 0x55, 0x84, 0xab, 0x76, 0xeb, 0xbb, 0x13, 0x31, 0x11, 0xd9, 0xae, 0x27, 0x57, 0x2a, - 0xb0, 0x6e, 0x4f, 0x84, 0x98, 0xcc, 0x98, 0x97, 0x59, 0xc3, 0x64, 0xec, 0x21, 0x9f, 0xb3, 0x18, - 0xc9, 0x3c, 0xcc, 0x03, 0xfe, 0xee, 0x06, 0xf8, 0x3a, 0x64, 0xb1, 0x37, 0x17, 0x49, 0x80, 0xf9, - 0xb9, 0xfd, 0x3f, 0x9e, 0x5b, 0xa5, 0x0c, 0x67, 0xc9, 0x84, 0x07, 0xde, 0x98, 0xb3, 0x19, 0x0d, - 0x09, 0x4e, 0x95, 0x82, 0xf3, 0x4d, 0x03, 0x38, 0x21, 0xf1, 0xd9, 0x41, 0xc4, 0x08, 0x32, 0xb3, - 0x05, 0x77, 0x56, 0x87, 0x07, 0x9c, 0xd6, 0xb4, 0x86, 0xd6, 0xdc, 0xec, 0x6c, 0xa7, 0x0b, 0xbb, - 0x7a, 0xb0, 0xf4, 0xf7, 0xba, 0x7e, 0x75, 0x15, 0xd4, 0xa3, 0xe6, 0x1e, 0x18, 0xc3, 0x24, 0xa0, - 0x33, 0x56, 0xd3, 0x65, 0xb4, 0x9f, 0x5b, 0xa6, 0x07, 0x46, 0x24, 0x04, 0x8e, 0xe3, 0x5a, 0xb9, - 0x51, 0x6e, 0x56, 0x5b, 0xf7, 0xdc, 0x6b, 0xbc, 0xb2, 0x5a, 0xdc, 0x43, 0x59, 0x8b, 0x9f, 0x87, - 0x99, 0x0f, 0x41, 0xe7, 0xa2, 0xb6, 0xd6, 0xd0, 0x9a, 0xd5, 0xd6, 0x7d, 0xf7, 0x16, 0x5c, 0x57, - 0xde, 0xb3, 0xd7, 0xef, 0x18, 0xe9, 0xc2, 0xd6, 0x7b, 0x7d, 0x5f, 0xe7, 0xc2, 0xb4, 0x00, 0x46, - 0x53, 0x36, 0x3a, 0x0b, 0x05, 0x0f, 0xb0, 0xb6, 0x9e, 0xe5, 0xbf, 0xe6, 0x31, 0x77, 0xa0, 0x1c, - 0x72, 0x5a, 0x33, 0x1a, 0x5a, 0x73, 0xcb, 0x97, 0x4b, 0xe7, 0x39, 0x6c, 0x4a, 0x9d, 0x63, 0x24, - 0x11, 0x16, 0x2a, 0x37, 0x97, 0xd4, 0xaf, 0x24, 0x3f, 0xe6, 0x0c, 0xbb, 0x6c, 0xc6, 0x0a, 0x32, - 0xbc, 0x25, 0x6a, 0xda, 0x50, 0x65, 0xe7, 0x1c, 0x07, 0x31, 0x12, 0x4c, 0x24, 0x42, 0xb9, 0x03, - 0xd2, 0x75, 0x9c, 0x79, 0xcc, 0x36, 0x6c, 0x4a, 0x8b, 0xd1, 0x01, 0xc1, 0x1c, 0x5a, 0xdd, 0x55, - 0x8d, 0xe6, 0x2e, 0x5f, 0xdd, 0x3d, 0x59, 0x36, 0x5a, 0xa7, 0x72, 0xb1, 0xb0, 0x4b, 0xef, 0xbe, - 0xd8, 0x9a, 0x5f, 0x51, 0xc7, 0xda, 0x68, 0xee, 0x81, 0xce, 0xa9, 0xa2, 0x96, 0x53, 0xed, 0xfa, - 0x3a, 0xa7, 0xce, 0x4b, 0x30, 0x14, 0x6b, 0x73, 0x17, 0xd6, 0x63, 0xa4, 0x3c, 0x50, 0x45, 0xf8, - 0xca, 0x90, 0x2f, 0x1e, 0x23, 0x15, 0x09, 0x2e, 0x5f, 0x5c, 0x59, 0xb9, 0x9f, 0x45, 0x51, 0x76, - 0x5d, 0xe5, 0x67, 0x51, 0x64, 0xd6, 0xa1, 0x82, 0x2c, 0x9a, 0xf3, 0x80, 0xcc, 0xb2, 0x9b, 0x56, - 0xfc, 0x95, 0xed, 0x7c, 0xd0, 0xa0, 0x22, 0x93, 0x3d, 0x3b, 0xe7, 0x58, 0xb0, 0xfd, 0xf4, 0x9c, - 0xdc, 0x8d, 0x22, 0x96, 0x48, 0xcb, 0xbf, 0x44, 0xba, 0xf6, 0x7b, 0xa4, 0xeb, 0x45, 0x90, 0x3a, - 0x4f, 0x61, 0x43, 0x56, 0xd3, 0xef, 0x1f, 0x16, 0x29, 0xc6, 0x99, 0xc2, 0x96, 0x82, 0xc1, 0x46, - 0x6d, 0x4a, 0x19, 0x2d, 0x44, 0xe4, 0x01, 0x6c, 0xb0, 0x73, 0x36, 0x1a, 0xac, 0xb0, 0x40, 0xba, - 0xb0, 0x0d, 0xa9, 0xd9, 0xeb, 0xfa, 0x86, 0xdc, 0xea, 0x51, 0xe7, 0x0d, 0x6c, 0x2f, 0x33, 0x65, - 0xdf, 0xc2, 0x7f, 0xcc, 0x75, 0xfb, 0x29, 0x9c, 0x7d, 0xf5, 0xc5, 0x1c, 0x91, 0x24, 0x2e, 0x96, - 0xd8, 0x69, 0x43, 0x55, 0x2a, 0xf8, 0x2c, 0x4e, 0xe6, 0x05, 0x25, 0xc6, 0xb0, 0x93, 0x8d, 0xbe, - 0xd5, 0xb8, 0x28, 0xc8, 0xe0, 0xe6, 0x10, 0xd2, 0x7f, 0x1e, 0x42, 0x9d, 0xa3, 0x8b, 0x4b, 0xab, - 0xf4, 0xf9, 0xd2, 0x2a, 0xbd, 0x4d, 0x2d, 0xed, 0x22, 0xb5, 0xb4, 0x4f, 0xa9, 0xa5, 0x7d, 0x4d, - 0x2d, 0xed, 0xfd, 0x77, 0x4b, 0x7b, 0xd1, 0xfa, 0x87, 0x7f, 0x9f, 0x27, 0xea, 0xe7, 0xb4, 0x74, - 0x5a, 0x1e, 0x1a, 0x59, 0x47, 0x3e, 0xfa, 0x11, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x58, 0x0f, 0xec, - 0xbe, 0x06, 0x00, 0x00, -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskCreate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - // unhandled: rootfs - // unhandled: pid - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - case "bundle": - return string(m.Bundle), len(m.Bundle) > 0 - case "io": - // NOTE(stevvooe): This is probably not correct in many cases. - // We assume that the target message also implements the Field - // method, which isn't likely true in a lot of cases. - // - // If you have a broken build and have found this comment, - // you may be closer to a solution. - if m.IO == nil { - return "", false - } - - return m.IO.Field(fieldpath[1:]) - case "checkpoint": - return string(m.Checkpoint), len(m.Checkpoint) > 0 - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskStart) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - // unhandled: pid - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskDelete) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - // unhandled: pid - // unhandled: exit_status - // unhandled: exited_at - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - case "id": - return string(m.ID), len(m.ID) > 0 - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskIO) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "stdin": - return string(m.Stdin), len(m.Stdin) > 0 - case "stdout": - return string(m.Stdout), len(m.Stdout) > 0 - case "stderr": - return string(m.Stderr), len(m.Stderr) > 0 - case "terminal": - return fmt.Sprint(m.Terminal), true - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskExit) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - // unhandled: pid - // unhandled: exit_status - // unhandled: exited_at - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - case "id": - return string(m.ID), len(m.ID) > 0 - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskOOM) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskExecAdded) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - case "exec_id": - return string(m.ExecID), len(m.ExecID) > 0 - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskExecStarted) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - // unhandled: pid - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - case "exec_id": - return string(m.ExecID), len(m.ExecID) > 0 - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskPaused) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskResumed) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - } - return "", false -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskCheckpointed) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - case "checkpoint": - return string(m.Checkpoint), len(m.Checkpoint) > 0 - } - return "", false -} -func (m *TaskCreate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TaskCreate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TaskCreate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x30 - } - if len(m.Checkpoint) > 0 { - i -= len(m.Checkpoint) - copy(dAtA[i:], m.Checkpoint) - i = encodeVarintTask(dAtA, i, uint64(len(m.Checkpoint))) - i-- - dAtA[i] = 0x2a - } - if m.IO != nil { - { - size, err := m.IO.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTask(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.Rootfs) > 0 { - for iNdEx := len(m.Rootfs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rootfs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTask(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Bundle) > 0 { - i -= len(m.Bundle) - copy(dAtA[i:], m.Bundle) - i = encodeVarintTask(dAtA, i, uint64(len(m.Bundle))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TaskStart) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TaskStart) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TaskStart) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x10 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TaskDelete) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TaskDelete) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TaskDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0x2a - } - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintTask(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x22 - if m.ExitStatus != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x18 - } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x10 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TaskIO) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TaskIO) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TaskIO) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x1a - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x12 - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TaskExit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TaskExit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TaskExit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err3 != nil { - return 0, err3 - } - i -= n3 - i = encodeVarintTask(dAtA, i, uint64(n3)) - i-- - dAtA[i] = 0x2a - if m.ExitStatus != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x20 - } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x18 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TaskOOM) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TaskOOM) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TaskOOM) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TaskExecAdded) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TaskExecAdded) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TaskExecAdded) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TaskExecStarted) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TaskExecStarted) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TaskExecStarted) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TaskPaused) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TaskPaused) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TaskPaused) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TaskResumed) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TaskResumed) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TaskResumed) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TaskCheckpointed) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TaskCheckpointed) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TaskCheckpointed) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Checkpoint) > 0 { - i -= len(m.Checkpoint) - copy(dAtA[i:], m.Checkpoint) - i = encodeVarintTask(dAtA, i, uint64(len(m.Checkpoint))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintTask(dAtA []byte, offset int, v uint64) int { - offset -= sovTask(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *TaskCreate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Bundle) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if len(m.Rootfs) > 0 { - for _, e := range m.Rootfs { - l = e.Size() - n += 1 + l + sovTask(uint64(l)) - } - } - if m.IO != nil { - l = m.IO.Size() - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Checkpoint) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TaskStart) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TaskDelete) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) - } - if m.ExitStatus != 0 { - n += 1 + sovTask(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovTask(uint64(l)) - l = len(m.ID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TaskIO) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Terminal { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TaskExit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.ID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) - } - if m.ExitStatus != 0 { - n += 1 + sovTask(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovTask(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TaskOOM) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TaskExecAdded) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TaskExecStarted) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TaskPaused) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TaskResumed) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TaskCheckpointed) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Checkpoint) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovTask(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTask(x uint64) (n int) { - return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *TaskCreate) String() string { - if this == nil { - return "nil" - } - repeatedStringForRootfs := "[]*Mount{" - for _, f := range this.Rootfs { - repeatedStringForRootfs += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForRootfs += "}" - s := strings.Join([]string{`&TaskCreate{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`, - `Rootfs:` + repeatedStringForRootfs + `,`, - `IO:` + strings.Replace(this.IO.String(), "TaskIO", "TaskIO", 1) + `,`, - `Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskStart) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskStart{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskDelete) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskDelete{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskIO) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskIO{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskExit) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskExit{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskOOM) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskOOM{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskExecAdded) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskExecAdded{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskExecStarted) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskExecStarted{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskPaused) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskPaused{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskResumed) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskResumed{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskCheckpointed) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskCheckpointed{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringTask(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *TaskCreate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskCreate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskCreate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bundle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rootfs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rootfs = append(m.Rootfs, &types.Mount{}) - if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IO", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.IO == nil { - m.IO = &TaskIO{} - } - if err := m.IO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Checkpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskStart) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskStart: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskStart: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskDelete) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskDelete: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskDelete: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskIO) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskIO: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskIO: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskExit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskExit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskExit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskOOM) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskOOM: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskOOM: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskExecAdded) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskExecAdded: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskExecAdded: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskExecStarted) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskExecStarted: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskExecStarted: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskPaused) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskPaused: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskPaused: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskResumed) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskResumed: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskResumed: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskCheckpointed) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskCheckpointed: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskCheckpointed: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Checkpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTask(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTask - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTask - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTask - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{10} +} + +func (x *TaskCheckpointed) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *TaskCheckpointed) GetCheckpoint() string { + if x != nil { + return x.Checkpoint + } + return "" +} + +var File_github_com_containerd_containerd_api_events_task_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_events_task_proto_rawDesc = []byte{ + 0x0a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x74, 0x61, + 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd5, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x73, 0x6b, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x6e, 0x64, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, + 0x73, 0x12, 0x29, 0x0a, 0x02, 0x69, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x4f, 0x52, 0x02, 0x69, 0x6f, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x70, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x40, + 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x10, + 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, + 0x22, 0xab, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x73, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x03, 0x70, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x6a, + 0x0a, 0x06, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x4f, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x64, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x12, 0x1a, + 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x22, 0xa9, 0x01, 0x0a, 0x08, 0x54, + 0x61, 0x73, 0x6b, 0x45, 0x78, 0x69, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, + 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, + 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, + 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x2c, 0x0a, 0x07, 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x4f, + 0x4d, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x49, 0x64, 0x22, 0x4b, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, + 0x41, 0x64, 0x64, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, + 0x64, 0x22, 0x5f, 0x0a, 0x0f, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, + 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, + 0x69, 0x64, 0x22, 0x2f, 0x0a, 0x0a, 0x54, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x75, 0x73, 0x65, 0x64, + 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x49, 0x64, 0x22, 0x30, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x55, 0x0a, 0x10, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x42, 0x38, 0x5a, 0x32, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTask = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTask = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_events_task_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_events_task_proto_rawDescData = file_github_com_containerd_containerd_api_events_task_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_events_task_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_events_task_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_events_task_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_events_task_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_events_task_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_github_com_containerd_containerd_api_events_task_proto_goTypes = []interface{}{ + (*TaskCreate)(nil), // 0: containerd.events.TaskCreate + (*TaskStart)(nil), // 1: containerd.events.TaskStart + (*TaskDelete)(nil), // 2: containerd.events.TaskDelete + (*TaskIO)(nil), // 3: containerd.events.TaskIO + (*TaskExit)(nil), // 4: containerd.events.TaskExit + (*TaskOOM)(nil), // 5: containerd.events.TaskOOM + (*TaskExecAdded)(nil), // 6: containerd.events.TaskExecAdded + (*TaskExecStarted)(nil), // 7: containerd.events.TaskExecStarted + (*TaskPaused)(nil), // 8: containerd.events.TaskPaused + (*TaskResumed)(nil), // 9: containerd.events.TaskResumed + (*TaskCheckpointed)(nil), // 10: containerd.events.TaskCheckpointed + (*types.Mount)(nil), // 11: containerd.types.Mount + (*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp +} +var file_github_com_containerd_containerd_api_events_task_proto_depIdxs = []int32{ + 11, // 0: containerd.events.TaskCreate.rootfs:type_name -> containerd.types.Mount + 3, // 1: containerd.events.TaskCreate.io:type_name -> containerd.events.TaskIO + 12, // 2: containerd.events.TaskDelete.exited_at:type_name -> google.protobuf.Timestamp + 12, // 3: containerd.events.TaskExit.exited_at:type_name -> google.protobuf.Timestamp + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_events_task_proto_init() } +func file_github_com_containerd_containerd_api_events_task_proto_init() { + if File_github_com_containerd_containerd_api_events_task_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskCreate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskStart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskDelete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskIO); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskExit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskOOM); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskExecAdded); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskExecStarted); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskPaused); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskResumed); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskCheckpointed); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_events_task_proto_rawDesc, + NumEnums: 0, + NumMessages: 11, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_events_task_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_events_task_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_events_task_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_events_task_proto = out.File + file_github_com_containerd_containerd_api_events_task_proto_rawDesc = nil + file_github_com_containerd_containerd_api_events_task_proto_goTypes = nil + file_github_com_containerd_containerd_api_events_task_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/events/task.proto b/vendor/github.com/containerd/containerd/api/events/task.proto index 3cbbbf00a0..238564dfe2 100644 --- a/vendor/github.com/containerd/containerd/api/events/task.proto +++ b/vendor/github.com/containerd/containerd/api/events/task.proto @@ -18,10 +18,9 @@ syntax = "proto3"; package containerd.events; -import weak "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; @@ -30,7 +29,7 @@ message TaskCreate { string container_id = 1; string bundle = 2; repeated containerd.types.Mount rootfs = 3; - TaskIO io = 4 [(gogoproto.customname) = "IO"]; + TaskIO io = 4; string checkpoint = 5; uint32 pid = 6; } @@ -44,7 +43,7 @@ message TaskDelete { string container_id = 1; uint32 pid = 2; uint32 exit_status = 3; - google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 4; // id is the specific exec. By default if omitted will be `""` thus matches // the init exec of the task matching `container_id`. string id = 5; @@ -62,7 +61,7 @@ message TaskExit { string id = 2; uint32 pid = 3; uint32 exit_status = 4; - google.protobuf.Timestamp exited_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 5; } message TaskOOM { diff --git a/vendor/github.com/containerd/containerd/api/events/task_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/task_fieldpath.pb.go new file mode 100644 index 0000000000..633fc3a653 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/task_fieldpath.pb.go @@ -0,0 +1,191 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/task.proto +package events + +import ( + fmt "fmt" +) + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: rootfs + // unhandled: pid + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "bundle": + return string(m.Bundle), len(m.Bundle) > 0 + case "io": + // NOTE(stevvooe): This is probably not correct in many cases. + // We assume that the target message also implements the Field + // method, which isn't likely true in a lot of cases. + // + // If you have a broken build and have found this comment, + // you may be closer to a solution. + if m.IO == nil { + return "", false + } + return m.IO.Field(fieldpath[1:]) + case "checkpoint": + return string(m.Checkpoint), len(m.Checkpoint) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskStart) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: pid + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: pid + // unhandled: exit_status + // unhandled: exited_at + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "id": + return string(m.ID), len(m.ID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskIO) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "stdin": + return string(m.Stdin), len(m.Stdin) > 0 + case "stdout": + return string(m.Stdout), len(m.Stdout) > 0 + case "stderr": + return string(m.Stderr), len(m.Stderr) > 0 + case "terminal": + return fmt.Sprint(m.Terminal), true + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskExit) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: pid + // unhandled: exit_status + // unhandled: exited_at + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "id": + return string(m.ID), len(m.ID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskOOM) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskExecAdded) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "exec_id": + return string(m.ExecID), len(m.ExecID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskExecStarted) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: pid + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "exec_id": + return string(m.ExecID), len(m.ExecID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskPaused) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskResumed) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskCheckpointed) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "checkpoint": + return string(m.Checkpoint), len(m.Checkpoint) > 0 + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/doc.go b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/doc.go new file mode 100644 index 0000000000..f960350c16 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sandbox diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.pb.go b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.pb.go new file mode 100644 index 0000000000..5b3d4aa786 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.pb.go @@ -0,0 +1,1480 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto + +package sandbox + +import ( + types "github.com/containerd/containerd/api/types" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CreateSandboxRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + BundlePath string `protobuf:"bytes,2,opt,name=bundle_path,json=bundlePath,proto3" json:"bundle_path,omitempty"` + Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` + Options *anypb.Any `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"` + NetnsPath string `protobuf:"bytes,5,opt,name=netns_path,json=netnsPath,proto3" json:"netns_path,omitempty"` +} + +func (x *CreateSandboxRequest) Reset() { + *x = CreateSandboxRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateSandboxRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSandboxRequest) ProtoMessage() {} + +func (x *CreateSandboxRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSandboxRequest.ProtoReflect.Descriptor instead. +func (*CreateSandboxRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateSandboxRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *CreateSandboxRequest) GetBundlePath() string { + if x != nil { + return x.BundlePath + } + return "" +} + +func (x *CreateSandboxRequest) GetRootfs() []*types.Mount { + if x != nil { + return x.Rootfs + } + return nil +} + +func (x *CreateSandboxRequest) GetOptions() *anypb.Any { + if x != nil { + return x.Options + } + return nil +} + +func (x *CreateSandboxRequest) GetNetnsPath() string { + if x != nil { + return x.NetnsPath + } + return "" +} + +type CreateSandboxResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CreateSandboxResponse) Reset() { + *x = CreateSandboxResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateSandboxResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSandboxResponse) ProtoMessage() {} + +func (x *CreateSandboxResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSandboxResponse.ProtoReflect.Descriptor instead. +func (*CreateSandboxResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{1} +} + +type StartSandboxRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *StartSandboxRequest) Reset() { + *x = StartSandboxRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartSandboxRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartSandboxRequest) ProtoMessage() {} + +func (x *StartSandboxRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartSandboxRequest.ProtoReflect.Descriptor instead. +func (*StartSandboxRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{2} +} + +func (x *StartSandboxRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type StartSandboxResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` +} + +func (x *StartSandboxResponse) Reset() { + *x = StartSandboxResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartSandboxResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartSandboxResponse) ProtoMessage() {} + +func (x *StartSandboxResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartSandboxResponse.ProtoReflect.Descriptor instead. +func (*StartSandboxResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{3} +} + +func (x *StartSandboxResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *StartSandboxResponse) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +type PlatformRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *PlatformRequest) Reset() { + *x = PlatformRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlatformRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlatformRequest) ProtoMessage() {} + +func (x *PlatformRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlatformRequest.ProtoReflect.Descriptor instead. +func (*PlatformRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{4} +} + +func (x *PlatformRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type PlatformResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Platform *types.Platform `protobuf:"bytes,1,opt,name=platform,proto3" json:"platform,omitempty"` +} + +func (x *PlatformResponse) Reset() { + *x = PlatformResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlatformResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlatformResponse) ProtoMessage() {} + +func (x *PlatformResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlatformResponse.ProtoReflect.Descriptor instead. +func (*PlatformResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{5} +} + +func (x *PlatformResponse) GetPlatform() *types.Platform { + if x != nil { + return x.Platform + } + return nil +} + +type StopSandboxRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + TimeoutSecs uint32 `protobuf:"varint,2,opt,name=timeout_secs,json=timeoutSecs,proto3" json:"timeout_secs,omitempty"` +} + +func (x *StopSandboxRequest) Reset() { + *x = StopSandboxRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopSandboxRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopSandboxRequest) ProtoMessage() {} + +func (x *StopSandboxRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopSandboxRequest.ProtoReflect.Descriptor instead. +func (*StopSandboxRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{6} +} + +func (x *StopSandboxRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *StopSandboxRequest) GetTimeoutSecs() uint32 { + if x != nil { + return x.TimeoutSecs + } + return 0 +} + +type StopSandboxResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StopSandboxResponse) Reset() { + *x = StopSandboxResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopSandboxResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopSandboxResponse) ProtoMessage() {} + +func (x *StopSandboxResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopSandboxResponse.ProtoReflect.Descriptor instead. +func (*StopSandboxResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{7} +} + +type UpdateSandboxRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Resources *anypb.Any `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *UpdateSandboxRequest) Reset() { + *x = UpdateSandboxRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateSandboxRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSandboxRequest) ProtoMessage() {} + +func (x *UpdateSandboxRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSandboxRequest.ProtoReflect.Descriptor instead. +func (*UpdateSandboxRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{8} +} + +func (x *UpdateSandboxRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *UpdateSandboxRequest) GetResources() *anypb.Any { + if x != nil { + return x.Resources + } + return nil +} + +func (x *UpdateSandboxRequest) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} + +type WaitSandboxRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *WaitSandboxRequest) Reset() { + *x = WaitSandboxRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WaitSandboxRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WaitSandboxRequest) ProtoMessage() {} + +func (x *WaitSandboxRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WaitSandboxRequest.ProtoReflect.Descriptor instead. +func (*WaitSandboxRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{9} +} + +func (x *WaitSandboxRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type WaitSandboxResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` +} + +func (x *WaitSandboxResponse) Reset() { + *x = WaitSandboxResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WaitSandboxResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WaitSandboxResponse) ProtoMessage() {} + +func (x *WaitSandboxResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WaitSandboxResponse.ProtoReflect.Descriptor instead. +func (*WaitSandboxResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{10} +} + +func (x *WaitSandboxResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *WaitSandboxResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +type UpdateSandboxResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpdateSandboxResponse) Reset() { + *x = UpdateSandboxResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateSandboxResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSandboxResponse) ProtoMessage() {} + +func (x *UpdateSandboxResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSandboxResponse.ProtoReflect.Descriptor instead. +func (*UpdateSandboxResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{11} +} + +type SandboxStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Verbose bool `protobuf:"varint,2,opt,name=verbose,proto3" json:"verbose,omitempty"` +} + +func (x *SandboxStatusRequest) Reset() { + *x = SandboxStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SandboxStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxStatusRequest) ProtoMessage() {} + +func (x *SandboxStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxStatusRequest.ProtoReflect.Descriptor instead. +func (*SandboxStatusRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{12} +} + +func (x *SandboxStatusRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *SandboxStatusRequest) GetVerbose() bool { + if x != nil { + return x.Verbose + } + return false +} + +type SandboxStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` + Info map[string]string `protobuf:"bytes,4,rep,name=info,proto3" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` + Extra *anypb.Any `protobuf:"bytes,7,opt,name=extra,proto3" json:"extra,omitempty"` +} + +func (x *SandboxStatusResponse) Reset() { + *x = SandboxStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SandboxStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxStatusResponse) ProtoMessage() {} + +func (x *SandboxStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxStatusResponse.ProtoReflect.Descriptor instead. +func (*SandboxStatusResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{13} +} + +func (x *SandboxStatusResponse) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *SandboxStatusResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *SandboxStatusResponse) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *SandboxStatusResponse) GetInfo() map[string]string { + if x != nil { + return x.Info + } + return nil +} + +func (x *SandboxStatusResponse) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *SandboxStatusResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +func (x *SandboxStatusResponse) GetExtra() *anypb.Any { + if x != nil { + return x.Extra + } + return nil +} + +type PingRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *PingRequest) Reset() { + *x = PingRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PingRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PingRequest) ProtoMessage() {} + +func (x *PingRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PingRequest.ProtoReflect.Descriptor instead. +func (*PingRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{14} +} + +func (x *PingRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type PingResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PingResponse) Reset() { + *x = PingResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PingResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PingResponse) ProtoMessage() {} + +func (x *PingResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PingResponse.ProtoReflect.Descriptor instead. +func (*PingResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{15} +} + +type ShutdownSandboxRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *ShutdownSandboxRequest) Reset() { + *x = ShutdownSandboxRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShutdownSandboxRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutdownSandboxRequest) ProtoMessage() {} + +func (x *ShutdownSandboxRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutdownSandboxRequest.ProtoReflect.Descriptor instead. +func (*ShutdownSandboxRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{16} +} + +func (x *ShutdownSandboxRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type ShutdownSandboxResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ShutdownSandboxResponse) Reset() { + *x = ShutdownSandboxResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShutdownSandboxResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutdownSandboxResponse) ProtoMessage() {} + +func (x *ShutdownSandboxResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutdownSandboxResponse.ProtoReflect.Descriptor instead. +func (*ShutdownSandboxResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{17} +} + +var File_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDesc = []byte{ + 0x0a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x39, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd6, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x1f, 0x0a, + 0x0b, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x2f, + 0x0a, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x12, + 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x6e, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x6e, 0x73, 0x50, 0x61, 0x74, 0x68, 0x22, 0x17, + 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x63, 0x0a, + 0x14, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x41, 0x74, 0x22, 0x30, 0x0a, 0x0f, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x49, 0x64, 0x22, 0x4a, 0x0a, 0x10, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, + 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x22, 0x56, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x70, + 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x91, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x66, 0x0a, 0x0b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x44, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x33, 0x0a, 0x12, 0x57, 0x61, 0x69, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x6f, 0x0a, 0x13, 0x57, 0x61, 0x69, 0x74, + 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x14, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x62, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76, 0x65, 0x72, 0x62, + 0x6f, 0x73, 0x65, 0x22, 0x8b, 0x03, 0x0a, 0x15, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, + 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x52, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x41, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2a, 0x0a, 0x05, + 0x65, 0x78, 0x74, 0x72, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x49, 0x6e, 0x66, 0x6f, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x2c, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, + 0x0e, 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x37, 0x0a, 0x16, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x68, 0x75, 0x74, + 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x32, 0xbe, 0x07, 0x0a, 0x07, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, + 0x7a, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x12, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x0c, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x32, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x74, 0x0a, 0x0b, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x74, 0x0a, 0x0b, 0x57, 0x61, 0x69, 0x74, 0x53, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x53, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7a, 0x0a, + 0x0d, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x33, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x0b, 0x50, 0x69, 0x6e, + 0x67, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x75, + 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2f, 0x76, 0x31, 0x3b, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescData = file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_goTypes = []interface{}{ + (*CreateSandboxRequest)(nil), // 0: containerd.runtime.sandbox.v1.CreateSandboxRequest + (*CreateSandboxResponse)(nil), // 1: containerd.runtime.sandbox.v1.CreateSandboxResponse + (*StartSandboxRequest)(nil), // 2: containerd.runtime.sandbox.v1.StartSandboxRequest + (*StartSandboxResponse)(nil), // 3: containerd.runtime.sandbox.v1.StartSandboxResponse + (*PlatformRequest)(nil), // 4: containerd.runtime.sandbox.v1.PlatformRequest + (*PlatformResponse)(nil), // 5: containerd.runtime.sandbox.v1.PlatformResponse + (*StopSandboxRequest)(nil), // 6: containerd.runtime.sandbox.v1.StopSandboxRequest + (*StopSandboxResponse)(nil), // 7: containerd.runtime.sandbox.v1.StopSandboxResponse + (*UpdateSandboxRequest)(nil), // 8: containerd.runtime.sandbox.v1.UpdateSandboxRequest + (*WaitSandboxRequest)(nil), // 9: containerd.runtime.sandbox.v1.WaitSandboxRequest + (*WaitSandboxResponse)(nil), // 10: containerd.runtime.sandbox.v1.WaitSandboxResponse + (*UpdateSandboxResponse)(nil), // 11: containerd.runtime.sandbox.v1.UpdateSandboxResponse + (*SandboxStatusRequest)(nil), // 12: containerd.runtime.sandbox.v1.SandboxStatusRequest + (*SandboxStatusResponse)(nil), // 13: containerd.runtime.sandbox.v1.SandboxStatusResponse + (*PingRequest)(nil), // 14: containerd.runtime.sandbox.v1.PingRequest + (*PingResponse)(nil), // 15: containerd.runtime.sandbox.v1.PingResponse + (*ShutdownSandboxRequest)(nil), // 16: containerd.runtime.sandbox.v1.ShutdownSandboxRequest + (*ShutdownSandboxResponse)(nil), // 17: containerd.runtime.sandbox.v1.ShutdownSandboxResponse + nil, // 18: containerd.runtime.sandbox.v1.UpdateSandboxRequest.AnnotationsEntry + nil, // 19: containerd.runtime.sandbox.v1.SandboxStatusResponse.InfoEntry + (*types.Mount)(nil), // 20: containerd.types.Mount + (*anypb.Any)(nil), // 21: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 22: google.protobuf.Timestamp + (*types.Platform)(nil), // 23: containerd.types.Platform +} +var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_depIdxs = []int32{ + 20, // 0: containerd.runtime.sandbox.v1.CreateSandboxRequest.rootfs:type_name -> containerd.types.Mount + 21, // 1: containerd.runtime.sandbox.v1.CreateSandboxRequest.options:type_name -> google.protobuf.Any + 22, // 2: containerd.runtime.sandbox.v1.StartSandboxResponse.created_at:type_name -> google.protobuf.Timestamp + 23, // 3: containerd.runtime.sandbox.v1.PlatformResponse.platform:type_name -> containerd.types.Platform + 21, // 4: containerd.runtime.sandbox.v1.UpdateSandboxRequest.resources:type_name -> google.protobuf.Any + 18, // 5: containerd.runtime.sandbox.v1.UpdateSandboxRequest.annotations:type_name -> containerd.runtime.sandbox.v1.UpdateSandboxRequest.AnnotationsEntry + 22, // 6: containerd.runtime.sandbox.v1.WaitSandboxResponse.exited_at:type_name -> google.protobuf.Timestamp + 19, // 7: containerd.runtime.sandbox.v1.SandboxStatusResponse.info:type_name -> containerd.runtime.sandbox.v1.SandboxStatusResponse.InfoEntry + 22, // 8: containerd.runtime.sandbox.v1.SandboxStatusResponse.created_at:type_name -> google.protobuf.Timestamp + 22, // 9: containerd.runtime.sandbox.v1.SandboxStatusResponse.exited_at:type_name -> google.protobuf.Timestamp + 21, // 10: containerd.runtime.sandbox.v1.SandboxStatusResponse.extra:type_name -> google.protobuf.Any + 0, // 11: containerd.runtime.sandbox.v1.Sandbox.CreateSandbox:input_type -> containerd.runtime.sandbox.v1.CreateSandboxRequest + 2, // 12: containerd.runtime.sandbox.v1.Sandbox.StartSandbox:input_type -> containerd.runtime.sandbox.v1.StartSandboxRequest + 4, // 13: containerd.runtime.sandbox.v1.Sandbox.Platform:input_type -> containerd.runtime.sandbox.v1.PlatformRequest + 6, // 14: containerd.runtime.sandbox.v1.Sandbox.StopSandbox:input_type -> containerd.runtime.sandbox.v1.StopSandboxRequest + 9, // 15: containerd.runtime.sandbox.v1.Sandbox.WaitSandbox:input_type -> containerd.runtime.sandbox.v1.WaitSandboxRequest + 12, // 16: containerd.runtime.sandbox.v1.Sandbox.SandboxStatus:input_type -> containerd.runtime.sandbox.v1.SandboxStatusRequest + 14, // 17: containerd.runtime.sandbox.v1.Sandbox.PingSandbox:input_type -> containerd.runtime.sandbox.v1.PingRequest + 16, // 18: containerd.runtime.sandbox.v1.Sandbox.ShutdownSandbox:input_type -> containerd.runtime.sandbox.v1.ShutdownSandboxRequest + 1, // 19: containerd.runtime.sandbox.v1.Sandbox.CreateSandbox:output_type -> containerd.runtime.sandbox.v1.CreateSandboxResponse + 3, // 20: containerd.runtime.sandbox.v1.Sandbox.StartSandbox:output_type -> containerd.runtime.sandbox.v1.StartSandboxResponse + 5, // 21: containerd.runtime.sandbox.v1.Sandbox.Platform:output_type -> containerd.runtime.sandbox.v1.PlatformResponse + 7, // 22: containerd.runtime.sandbox.v1.Sandbox.StopSandbox:output_type -> containerd.runtime.sandbox.v1.StopSandboxResponse + 10, // 23: containerd.runtime.sandbox.v1.Sandbox.WaitSandbox:output_type -> containerd.runtime.sandbox.v1.WaitSandboxResponse + 13, // 24: containerd.runtime.sandbox.v1.Sandbox.SandboxStatus:output_type -> containerd.runtime.sandbox.v1.SandboxStatusResponse + 15, // 25: containerd.runtime.sandbox.v1.Sandbox.PingSandbox:output_type -> containerd.runtime.sandbox.v1.PingResponse + 17, // 26: containerd.runtime.sandbox.v1.Sandbox.ShutdownSandbox:output_type -> containerd.runtime.sandbox.v1.ShutdownSandboxResponse + 19, // [19:27] is the sub-list for method output_type + 11, // [11:19] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_init() } +func file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_init() { + if File_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateSandboxRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateSandboxResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartSandboxRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartSandboxResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlatformRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlatformResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopSandboxRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopSandboxResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateSandboxRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WaitSandboxRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WaitSandboxResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateSandboxResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShutdownSandboxRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShutdownSandboxResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDesc, + NumEnums: 0, + NumMessages: 20, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto = out.File + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDesc = nil + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_goTypes = nil + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto new file mode 100644 index 0000000000..a051f3ea35 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto @@ -0,0 +1,136 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.runtime.sandbox.v1; + +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +import "github.com/containerd/containerd/api/types/mount.proto"; +import "github.com/containerd/containerd/api/types/platform.proto"; + +option go_package = "github.com/containerd/containerd/api/runtime/sandbox/v1;sandbox"; + +// Sandbox is an optional interface that shim may implement to support sandboxes environments. +// A typical example of sandbox is microVM or pause container - an entity that groups containers and/or +// holds resources relevant for this group. +service Sandbox { + // CreateSandbox will be called right after sandbox shim instance launched. + // It is a good place to initialize sandbox environment. + rpc CreateSandbox(CreateSandboxRequest) returns (CreateSandboxResponse); + + // StartSandbox will start previsouly created sandbox. + rpc StartSandbox(StartSandboxRequest) returns (StartSandboxResponse); + + // Platform queries the platform the sandbox is going to run containers on. + // containerd will use this to generate a proper OCI spec. + rpc Platform(PlatformRequest) returns (PlatformResponse); + + // StopSandbox will stop existing sandbox instance + rpc StopSandbox(StopSandboxRequest) returns (StopSandboxResponse); + + // WaitSandbox blocks until sanbox exits. + rpc WaitSandbox(WaitSandboxRequest) returns (WaitSandboxResponse); + + // SandboxStatus will return current status of the running sandbox instance + rpc SandboxStatus(SandboxStatusRequest) returns (SandboxStatusResponse); + + // PingSandbox is a lightweight API call to check whether sandbox alive. + rpc PingSandbox(PingRequest) returns (PingResponse); + + // ShutdownSandbox must shutdown shim instance. + rpc ShutdownSandbox(ShutdownSandboxRequest) returns (ShutdownSandboxResponse); +} + +message CreateSandboxRequest { + string sandbox_id = 1; + string bundle_path = 2; + repeated containerd.types.Mount rootfs = 3; + google.protobuf.Any options = 4; + string netns_path = 5; +} + +message CreateSandboxResponse {} + +message StartSandboxRequest { + string sandbox_id = 1; +} + +message StartSandboxResponse { + uint32 pid = 1; + google.protobuf.Timestamp created_at = 2; +} + +message PlatformRequest { + string sandbox_id = 1; +} + +message PlatformResponse { + containerd.types.Platform platform = 1; +} + +message StopSandboxRequest { + string sandbox_id = 1; + uint32 timeout_secs = 2; +} + +message StopSandboxResponse {} + +message UpdateSandboxRequest { + string sandbox_id = 1; + google.protobuf.Any resources = 2; + map annotations = 3; +} + +message WaitSandboxRequest { + string sandbox_id = 1; +} + +message WaitSandboxResponse { + uint32 exit_status = 1; + google.protobuf.Timestamp exited_at = 2; +} + +message UpdateSandboxResponse {} + +message SandboxStatusRequest { + string sandbox_id = 1; + bool verbose = 2; +} + +message SandboxStatusResponse { + string sandbox_id = 1; + uint32 pid = 2; + string state = 3; + map info = 4; + google.protobuf.Timestamp created_at = 5; + google.protobuf.Timestamp exited_at = 6; + google.protobuf.Any extra = 7; +} + +message PingRequest { + string sandbox_id = 1; +} + +message PingResponse {} + +message ShutdownSandboxRequest { + string sandbox_id = 1; +} + +message ShutdownSandboxResponse {} diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_grpc.pb.go b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_grpc.pb.go new file mode 100644 index 0000000000..f794249861 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_grpc.pb.go @@ -0,0 +1,377 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto + +package sandbox + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// SandboxClient is the client API for Sandbox service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SandboxClient interface { + // CreateSandbox will be called right after sandbox shim instance launched. + // It is a good place to initialize sandbox environment. + CreateSandbox(ctx context.Context, in *CreateSandboxRequest, opts ...grpc.CallOption) (*CreateSandboxResponse, error) + // StartSandbox will start previsouly created sandbox. + StartSandbox(ctx context.Context, in *StartSandboxRequest, opts ...grpc.CallOption) (*StartSandboxResponse, error) + // Platform queries the platform the sandbox is going to run containers on. + // containerd will use this to generate a proper OCI spec. + Platform(ctx context.Context, in *PlatformRequest, opts ...grpc.CallOption) (*PlatformResponse, error) + // StopSandbox will stop existing sandbox instance + StopSandbox(ctx context.Context, in *StopSandboxRequest, opts ...grpc.CallOption) (*StopSandboxResponse, error) + // WaitSandbox blocks until sanbox exits. + WaitSandbox(ctx context.Context, in *WaitSandboxRequest, opts ...grpc.CallOption) (*WaitSandboxResponse, error) + // SandboxStatus will return current status of the running sandbox instance + SandboxStatus(ctx context.Context, in *SandboxStatusRequest, opts ...grpc.CallOption) (*SandboxStatusResponse, error) + // PingSandbox is a lightweight API call to check whether sandbox alive. + PingSandbox(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) + // ShutdownSandbox must shutdown shim instance. + ShutdownSandbox(ctx context.Context, in *ShutdownSandboxRequest, opts ...grpc.CallOption) (*ShutdownSandboxResponse, error) +} + +type sandboxClient struct { + cc grpc.ClientConnInterface +} + +func NewSandboxClient(cc grpc.ClientConnInterface) SandboxClient { + return &sandboxClient{cc} +} + +func (c *sandboxClient) CreateSandbox(ctx context.Context, in *CreateSandboxRequest, opts ...grpc.CallOption) (*CreateSandboxResponse, error) { + out := new(CreateSandboxResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/CreateSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) StartSandbox(ctx context.Context, in *StartSandboxRequest, opts ...grpc.CallOption) (*StartSandboxResponse, error) { + out := new(StartSandboxResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/StartSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) Platform(ctx context.Context, in *PlatformRequest, opts ...grpc.CallOption) (*PlatformResponse, error) { + out := new(PlatformResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/Platform", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) StopSandbox(ctx context.Context, in *StopSandboxRequest, opts ...grpc.CallOption) (*StopSandboxResponse, error) { + out := new(StopSandboxResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/StopSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) WaitSandbox(ctx context.Context, in *WaitSandboxRequest, opts ...grpc.CallOption) (*WaitSandboxResponse, error) { + out := new(WaitSandboxResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/WaitSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) SandboxStatus(ctx context.Context, in *SandboxStatusRequest, opts ...grpc.CallOption) (*SandboxStatusResponse, error) { + out := new(SandboxStatusResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/SandboxStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) PingSandbox(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) { + out := new(PingResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/PingSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) ShutdownSandbox(ctx context.Context, in *ShutdownSandboxRequest, opts ...grpc.CallOption) (*ShutdownSandboxResponse, error) { + out := new(ShutdownSandboxResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/ShutdownSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SandboxServer is the server API for Sandbox service. +// All implementations must embed UnimplementedSandboxServer +// for forward compatibility +type SandboxServer interface { + // CreateSandbox will be called right after sandbox shim instance launched. + // It is a good place to initialize sandbox environment. + CreateSandbox(context.Context, *CreateSandboxRequest) (*CreateSandboxResponse, error) + // StartSandbox will start previsouly created sandbox. + StartSandbox(context.Context, *StartSandboxRequest) (*StartSandboxResponse, error) + // Platform queries the platform the sandbox is going to run containers on. + // containerd will use this to generate a proper OCI spec. + Platform(context.Context, *PlatformRequest) (*PlatformResponse, error) + // StopSandbox will stop existing sandbox instance + StopSandbox(context.Context, *StopSandboxRequest) (*StopSandboxResponse, error) + // WaitSandbox blocks until sanbox exits. + WaitSandbox(context.Context, *WaitSandboxRequest) (*WaitSandboxResponse, error) + // SandboxStatus will return current status of the running sandbox instance + SandboxStatus(context.Context, *SandboxStatusRequest) (*SandboxStatusResponse, error) + // PingSandbox is a lightweight API call to check whether sandbox alive. + PingSandbox(context.Context, *PingRequest) (*PingResponse, error) + // ShutdownSandbox must shutdown shim instance. + ShutdownSandbox(context.Context, *ShutdownSandboxRequest) (*ShutdownSandboxResponse, error) + mustEmbedUnimplementedSandboxServer() +} + +// UnimplementedSandboxServer must be embedded to have forward compatible implementations. +type UnimplementedSandboxServer struct { +} + +func (UnimplementedSandboxServer) CreateSandbox(context.Context, *CreateSandboxRequest) (*CreateSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSandbox not implemented") +} +func (UnimplementedSandboxServer) StartSandbox(context.Context, *StartSandboxRequest) (*StartSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartSandbox not implemented") +} +func (UnimplementedSandboxServer) Platform(context.Context, *PlatformRequest) (*PlatformResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Platform not implemented") +} +func (UnimplementedSandboxServer) StopSandbox(context.Context, *StopSandboxRequest) (*StopSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopSandbox not implemented") +} +func (UnimplementedSandboxServer) WaitSandbox(context.Context, *WaitSandboxRequest) (*WaitSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WaitSandbox not implemented") +} +func (UnimplementedSandboxServer) SandboxStatus(context.Context, *SandboxStatusRequest) (*SandboxStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SandboxStatus not implemented") +} +func (UnimplementedSandboxServer) PingSandbox(context.Context, *PingRequest) (*PingResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PingSandbox not implemented") +} +func (UnimplementedSandboxServer) ShutdownSandbox(context.Context, *ShutdownSandboxRequest) (*ShutdownSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ShutdownSandbox not implemented") +} +func (UnimplementedSandboxServer) mustEmbedUnimplementedSandboxServer() {} + +// UnsafeSandboxServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SandboxServer will +// result in compilation errors. +type UnsafeSandboxServer interface { + mustEmbedUnimplementedSandboxServer() +} + +func RegisterSandboxServer(s grpc.ServiceRegistrar, srv SandboxServer) { + s.RegisterService(&Sandbox_ServiceDesc, srv) +} + +func _Sandbox_CreateSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).CreateSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/CreateSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).CreateSandbox(ctx, req.(*CreateSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_StartSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).StartSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/StartSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).StartSandbox(ctx, req.(*StartSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_Platform_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PlatformRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).Platform(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/Platform", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).Platform(ctx, req.(*PlatformRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_StopSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).StopSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/StopSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).StopSandbox(ctx, req.(*StopSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_WaitSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WaitSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).WaitSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/WaitSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).WaitSandbox(ctx, req.(*WaitSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_SandboxStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SandboxStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).SandboxStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/SandboxStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).SandboxStatus(ctx, req.(*SandboxStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_PingSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).PingSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/PingSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).PingSandbox(ctx, req.(*PingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_ShutdownSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ShutdownSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).ShutdownSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/ShutdownSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).ShutdownSandbox(ctx, req.(*ShutdownSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Sandbox_ServiceDesc is the grpc.ServiceDesc for Sandbox service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Sandbox_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.runtime.sandbox.v1.Sandbox", + HandlerType: (*SandboxServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSandbox", + Handler: _Sandbox_CreateSandbox_Handler, + }, + { + MethodName: "StartSandbox", + Handler: _Sandbox_StartSandbox_Handler, + }, + { + MethodName: "Platform", + Handler: _Sandbox_Platform_Handler, + }, + { + MethodName: "StopSandbox", + Handler: _Sandbox_StopSandbox_Handler, + }, + { + MethodName: "WaitSandbox", + Handler: _Sandbox_WaitSandbox_Handler, + }, + { + MethodName: "SandboxStatus", + Handler: _Sandbox_SandboxStatus_Handler, + }, + { + MethodName: "PingSandbox", + Handler: _Sandbox_PingSandbox_Handler, + }, + { + MethodName: "ShutdownSandbox", + Handler: _Sandbox_ShutdownSandbox_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_ttrpc.pb.go new file mode 100644 index 0000000000..d935fe611f --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_ttrpc.pb.go @@ -0,0 +1,156 @@ +// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT. +// source: github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto +package sandbox + +import ( + context "context" + ttrpc "github.com/containerd/ttrpc" +) + +type TTRPCSandboxService interface { + CreateSandbox(context.Context, *CreateSandboxRequest) (*CreateSandboxResponse, error) + StartSandbox(context.Context, *StartSandboxRequest) (*StartSandboxResponse, error) + Platform(context.Context, *PlatformRequest) (*PlatformResponse, error) + StopSandbox(context.Context, *StopSandboxRequest) (*StopSandboxResponse, error) + WaitSandbox(context.Context, *WaitSandboxRequest) (*WaitSandboxResponse, error) + SandboxStatus(context.Context, *SandboxStatusRequest) (*SandboxStatusResponse, error) + PingSandbox(context.Context, *PingRequest) (*PingResponse, error) + ShutdownSandbox(context.Context, *ShutdownSandboxRequest) (*ShutdownSandboxResponse, error) +} + +func RegisterTTRPCSandboxService(srv *ttrpc.Server, svc TTRPCSandboxService) { + srv.RegisterService("containerd.runtime.sandbox.v1.Sandbox", &ttrpc.ServiceDesc{ + Methods: map[string]ttrpc.Method{ + "CreateSandbox": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CreateSandboxRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.CreateSandbox(ctx, &req) + }, + "StartSandbox": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StartSandboxRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.StartSandbox(ctx, &req) + }, + "Platform": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req PlatformRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Platform(ctx, &req) + }, + "StopSandbox": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StopSandboxRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.StopSandbox(ctx, &req) + }, + "WaitSandbox": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req WaitSandboxRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.WaitSandbox(ctx, &req) + }, + "SandboxStatus": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req SandboxStatusRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.SandboxStatus(ctx, &req) + }, + "PingSandbox": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req PingRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.PingSandbox(ctx, &req) + }, + "ShutdownSandbox": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ShutdownSandboxRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.ShutdownSandbox(ctx, &req) + }, + }, + }) +} + +type ttrpcsandboxClient struct { + client *ttrpc.Client +} + +func NewTTRPCSandboxClient(client *ttrpc.Client) TTRPCSandboxService { + return &ttrpcsandboxClient{ + client: client, + } +} + +func (c *ttrpcsandboxClient) CreateSandbox(ctx context.Context, req *CreateSandboxRequest) (*CreateSandboxResponse, error) { + var resp CreateSandboxResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "CreateSandbox", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) StartSandbox(ctx context.Context, req *StartSandboxRequest) (*StartSandboxResponse, error) { + var resp StartSandboxResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "StartSandbox", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) Platform(ctx context.Context, req *PlatformRequest) (*PlatformResponse, error) { + var resp PlatformResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "Platform", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) StopSandbox(ctx context.Context, req *StopSandboxRequest) (*StopSandboxResponse, error) { + var resp StopSandboxResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "StopSandbox", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) WaitSandbox(ctx context.Context, req *WaitSandboxRequest) (*WaitSandboxResponse, error) { + var resp WaitSandboxResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "WaitSandbox", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) SandboxStatus(ctx context.Context, req *SandboxStatusRequest) (*SandboxStatusResponse, error) { + var resp SandboxStatusResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "SandboxStatus", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) PingSandbox(ctx context.Context, req *PingRequest) (*PingResponse, error) { + var resp PingResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "PingSandbox", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) ShutdownSandbox(ctx context.Context, req *ShutdownSandboxRequest) (*ShutdownSandboxResponse, error) { + var resp ShutdownSandboxResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "ShutdownSandbox", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/task/doc.go b/vendor/github.com/containerd/containerd/api/runtime/task/v2/doc.go similarity index 100% rename from vendor/github.com/containerd/containerd/runtime/v2/task/doc.go rename to vendor/github.com/containerd/containerd/api/runtime/task/v2/doc.go diff --git a/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.pb.go b/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.pb.go new file mode 100644 index 0000000000..383e29db40 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.pb.go @@ -0,0 +1,2338 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/runtime/task/v2/shim.proto + +package task + +import ( + types "github.com/containerd/containerd/api/types" + task "github.com/containerd/containerd/api/types/task" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CreateTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` + Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` + Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` + Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` + Checkpoint string `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` + ParentCheckpoint string `protobuf:"bytes,9,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"` + Options *anypb.Any `protobuf:"bytes,10,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *CreateTaskRequest) Reset() { + *x = CreateTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTaskRequest) ProtoMessage() {} + +func (x *CreateTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTaskRequest.ProtoReflect.Descriptor instead. +func (*CreateTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateTaskRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *CreateTaskRequest) GetBundle() string { + if x != nil { + return x.Bundle + } + return "" +} + +func (x *CreateTaskRequest) GetRootfs() []*types.Mount { + if x != nil { + return x.Rootfs + } + return nil +} + +func (x *CreateTaskRequest) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +func (x *CreateTaskRequest) GetStdin() string { + if x != nil { + return x.Stdin + } + return "" +} + +func (x *CreateTaskRequest) GetStdout() string { + if x != nil { + return x.Stdout + } + return "" +} + +func (x *CreateTaskRequest) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" +} + +func (x *CreateTaskRequest) GetCheckpoint() string { + if x != nil { + return x.Checkpoint + } + return "" +} + +func (x *CreateTaskRequest) GetParentCheckpoint() string { + if x != nil { + return x.ParentCheckpoint + } + return "" +} + +func (x *CreateTaskRequest) GetOptions() *anypb.Any { + if x != nil { + return x.Options + } + return nil +} + +type CreateTaskResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (x *CreateTaskResponse) Reset() { + *x = CreateTaskResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTaskResponse) ProtoMessage() {} + +func (x *CreateTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTaskResponse.ProtoReflect.Descriptor instead. +func (*CreateTaskResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateTaskResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +type DeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *DeleteRequest) Reset() { + *x = DeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteRequest) ProtoMessage() {} + +func (x *DeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteRequest.ProtoReflect.Descriptor instead. +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{2} +} + +func (x *DeleteRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *DeleteRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +type DeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,2,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` +} + +func (x *DeleteResponse) Reset() { + *x = DeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResponse) ProtoMessage() {} + +func (x *DeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteResponse.ProtoReflect.Descriptor instead. +func (*DeleteResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{3} +} + +func (x *DeleteResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *DeleteResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *DeleteResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +type ExecProcessRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Terminal bool `protobuf:"varint,3,opt,name=terminal,proto3" json:"terminal,omitempty"` + Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` + Spec *anypb.Any `protobuf:"bytes,7,opt,name=spec,proto3" json:"spec,omitempty"` +} + +func (x *ExecProcessRequest) Reset() { + *x = ExecProcessRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExecProcessRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecProcessRequest) ProtoMessage() {} + +func (x *ExecProcessRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecProcessRequest.ProtoReflect.Descriptor instead. +func (*ExecProcessRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{4} +} + +func (x *ExecProcessRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *ExecProcessRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +func (x *ExecProcessRequest) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +func (x *ExecProcessRequest) GetStdin() string { + if x != nil { + return x.Stdin + } + return "" +} + +func (x *ExecProcessRequest) GetStdout() string { + if x != nil { + return x.Stdout + } + return "" +} + +func (x *ExecProcessRequest) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" +} + +func (x *ExecProcessRequest) GetSpec() *anypb.Any { + if x != nil { + return x.Spec + } + return nil +} + +type ExecProcessResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ExecProcessResponse) Reset() { + *x = ExecProcessResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExecProcessResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecProcessResponse) ProtoMessage() {} + +func (x *ExecProcessResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecProcessResponse.ProtoReflect.Descriptor instead. +func (*ExecProcessResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{5} +} + +type ResizePtyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Width uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"` + Height uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` +} + +func (x *ResizePtyRequest) Reset() { + *x = ResizePtyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResizePtyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResizePtyRequest) ProtoMessage() {} + +func (x *ResizePtyRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResizePtyRequest.ProtoReflect.Descriptor instead. +func (*ResizePtyRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{6} +} + +func (x *ResizePtyRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *ResizePtyRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +func (x *ResizePtyRequest) GetWidth() uint32 { + if x != nil { + return x.Width + } + return 0 +} + +func (x *ResizePtyRequest) GetHeight() uint32 { + if x != nil { + return x.Height + } + return 0 +} + +type StateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *StateRequest) Reset() { + *x = StateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StateRequest) ProtoMessage() {} + +func (x *StateRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StateRequest.ProtoReflect.Descriptor instead. +func (*StateRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{7} +} + +func (x *StateRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *StateRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +type StateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Status task.Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` + Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` + ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` + ExecID string `protobuf:"bytes,11,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *StateResponse) Reset() { + *x = StateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StateResponse) ProtoMessage() {} + +func (x *StateResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StateResponse.ProtoReflect.Descriptor instead. +func (*StateResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{8} +} + +func (x *StateResponse) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *StateResponse) GetBundle() string { + if x != nil { + return x.Bundle + } + return "" +} + +func (x *StateResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *StateResponse) GetStatus() task.Status { + if x != nil { + return x.Status + } + return task.Status(0) +} + +func (x *StateResponse) GetStdin() string { + if x != nil { + return x.Stdin + } + return "" +} + +func (x *StateResponse) GetStdout() string { + if x != nil { + return x.Stdout + } + return "" +} + +func (x *StateResponse) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" +} + +func (x *StateResponse) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +func (x *StateResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *StateResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +func (x *StateResponse) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +type KillRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Signal uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` + All bool `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"` +} + +func (x *KillRequest) Reset() { + *x = KillRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KillRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KillRequest) ProtoMessage() {} + +func (x *KillRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KillRequest.ProtoReflect.Descriptor instead. +func (*KillRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{9} +} + +func (x *KillRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *KillRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +func (x *KillRequest) GetSignal() uint32 { + if x != nil { + return x.Signal + } + return 0 +} + +func (x *KillRequest) GetAll() bool { + if x != nil { + return x.All + } + return false +} + +type CloseIORequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Stdin bool `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"` +} + +func (x *CloseIORequest) Reset() { + *x = CloseIORequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloseIORequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloseIORequest) ProtoMessage() {} + +func (x *CloseIORequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloseIORequest.ProtoReflect.Descriptor instead. +func (*CloseIORequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{10} +} + +func (x *CloseIORequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *CloseIORequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +func (x *CloseIORequest) GetStdin() bool { + if x != nil { + return x.Stdin + } + return false +} + +type PidsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *PidsRequest) Reset() { + *x = PidsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PidsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PidsRequest) ProtoMessage() {} + +func (x *PidsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PidsRequest.ProtoReflect.Descriptor instead. +func (*PidsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{11} +} + +func (x *PidsRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +type PidsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Processes []*task.ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` +} + +func (x *PidsResponse) Reset() { + *x = PidsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PidsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PidsResponse) ProtoMessage() {} + +func (x *PidsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PidsResponse.ProtoReflect.Descriptor instead. +func (*PidsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{12} +} + +func (x *PidsResponse) GetProcesses() []*task.ProcessInfo { + if x != nil { + return x.Processes + } + return nil +} + +type CheckpointTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Options *anypb.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *CheckpointTaskRequest) Reset() { + *x = CheckpointTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckpointTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckpointTaskRequest) ProtoMessage() {} + +func (x *CheckpointTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckpointTaskRequest.ProtoReflect.Descriptor instead. +func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{13} +} + +func (x *CheckpointTaskRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *CheckpointTaskRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *CheckpointTaskRequest) GetOptions() *anypb.Any { + if x != nil { + return x.Options + } + return nil +} + +type UpdateTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Resources *anypb.Any `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *UpdateTaskRequest) Reset() { + *x = UpdateTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateTaskRequest) ProtoMessage() {} + +func (x *UpdateTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateTaskRequest.ProtoReflect.Descriptor instead. +func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{14} +} + +func (x *UpdateTaskRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *UpdateTaskRequest) GetResources() *anypb.Any { + if x != nil { + return x.Resources + } + return nil +} + +func (x *UpdateTaskRequest) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} + +type StartRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *StartRequest) Reset() { + *x = StartRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartRequest) ProtoMessage() {} + +func (x *StartRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartRequest.ProtoReflect.Descriptor instead. +func (*StartRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{15} +} + +func (x *StartRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *StartRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +type StartResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (x *StartResponse) Reset() { + *x = StartResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartResponse) ProtoMessage() {} + +func (x *StartResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartResponse.ProtoReflect.Descriptor instead. +func (*StartResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{16} +} + +func (x *StartResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +type WaitRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *WaitRequest) Reset() { + *x = WaitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WaitRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WaitRequest) ProtoMessage() {} + +func (x *WaitRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WaitRequest.ProtoReflect.Descriptor instead. +func (*WaitRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{17} +} + +func (x *WaitRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *WaitRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +type WaitResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` +} + +func (x *WaitResponse) Reset() { + *x = WaitResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WaitResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WaitResponse) ProtoMessage() {} + +func (x *WaitResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WaitResponse.ProtoReflect.Descriptor instead. +func (*WaitResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{18} +} + +func (x *WaitResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *WaitResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +type StatsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *StatsRequest) Reset() { + *x = StatsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsRequest) ProtoMessage() {} + +func (x *StatsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsRequest.ProtoReflect.Descriptor instead. +func (*StatsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{19} +} + +func (x *StatsRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +type StatsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Stats *anypb.Any `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"` +} + +func (x *StatsResponse) Reset() { + *x = StatsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsResponse) ProtoMessage() {} + +func (x *StatsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsResponse.ProtoReflect.Descriptor instead. +func (*StatsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{20} +} + +func (x *StatsResponse) GetStats() *anypb.Any { + if x != nil { + return x.Stats + } + return nil +} + +type ConnectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ConnectRequest) Reset() { + *x = ConnectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectRequest) ProtoMessage() {} + +func (x *ConnectRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectRequest.ProtoReflect.Descriptor instead. +func (*ConnectRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{21} +} + +func (x *ConnectRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +type ConnectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ShimPid uint32 `protobuf:"varint,1,opt,name=shim_pid,json=shimPid,proto3" json:"shim_pid,omitempty"` + TaskPid uint32 `protobuf:"varint,2,opt,name=task_pid,json=taskPid,proto3" json:"task_pid,omitempty"` + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *ConnectResponse) Reset() { + *x = ConnectResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectResponse) ProtoMessage() {} + +func (x *ConnectResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectResponse.ProtoReflect.Descriptor instead. +func (*ConnectResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{22} +} + +func (x *ConnectResponse) GetShimPid() uint32 { + if x != nil { + return x.ShimPid + } + return 0 +} + +func (x *ConnectResponse) GetTaskPid() uint32 { + if x != nil { + return x.TaskPid + } + return 0 +} + +func (x *ConnectResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +type ShutdownRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Now bool `protobuf:"varint,2,opt,name=now,proto3" json:"now,omitempty"` +} + +func (x *ShutdownRequest) Reset() { + *x = ShutdownRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShutdownRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutdownRequest) ProtoMessage() {} + +func (x *ShutdownRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutdownRequest.ProtoReflect.Descriptor instead. +func (*ShutdownRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{23} +} + +func (x *ShutdownRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *ShutdownRequest) GetNow() bool { + if x != nil { + return x.Now + } + return false +} + +type PauseRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *PauseRequest) Reset() { + *x = PauseRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PauseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PauseRequest) ProtoMessage() {} + +func (x *PauseRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PauseRequest.ProtoReflect.Descriptor instead. +func (*PauseRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{24} +} + +func (x *PauseRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +type ResumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ResumeRequest) Reset() { + *x = ResumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResumeRequest) ProtoMessage() {} + +func (x *ResumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResumeRequest.ProtoReflect.Descriptor instead. +func (*ResumeRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{25} +} + +func (x *ResumeRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +var File_github_com_containerd_containerd_api_runtime_task_v2_shim_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDesc = []byte{ + 0x0a, 0x3f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x74, + 0x61, 0x73, 0x6b, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x68, 0x69, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, + 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xcb, 0x02, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x6e, 0x64, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, + 0x64, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, + 0x65, 0x72, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x22, 0x26, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x38, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, + 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, + 0x49, 0x64, 0x22, 0x7c, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, + 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, + 0x22, 0xc9, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, + 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x64, + 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, + 0x64, 0x65, 0x72, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x65, + 0x72, 0x72, 0x12, 0x28, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x22, 0x15, 0x0a, 0x13, + 0x45, 0x78, 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x69, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x50, 0x74, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, + 0x12, 0x14, 0x0a, 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x37, + 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x17, + 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0xd3, 0x02, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x6e, + 0x64, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, + 0x70, 0x69, 0x64, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x64, 0x69, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x12, 0x1a, + 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, + 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, + 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, + 0x65, 0x64, 0x41, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0x60, 0x0a, + 0x0b, 0x4b, 0x69, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, + 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, + 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x12, 0x10, 0x0a, + 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x22, + 0x4f, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x49, 0x4f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x64, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, + 0x22, 0x1d, 0x0a, 0x0b, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, + 0x4e, 0x0a, 0x0c, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3e, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, + 0x6b, 0x0a, 0x15, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x2e, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xf1, 0x01, 0x0a, + 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x32, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x37, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0x21, 0x0a, 0x0d, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x36, 0x0a, 0x0b, + 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, + 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, + 0x65, 0x63, 0x49, 0x64, 0x22, 0x68, 0x0a, 0x0c, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x1e, + 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x3b, + 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2a, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x22, 0x20, 0x0a, 0x0e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x61, 0x0a, + 0x0f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x69, 0x6d, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x69, 0x6d, 0x50, 0x69, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x74, + 0x61, 0x73, 0x6b, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x74, + 0x61, 0x73, 0x6b, 0x50, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x33, 0x0a, 0x0f, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x03, 0x6e, 0x6f, 0x77, 0x22, 0x1e, 0x0a, 0x0c, 0x50, 0x61, 0x75, 0x73, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x1f, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x32, 0x8a, 0x0a, 0x0a, 0x04, 0x54, 0x61, 0x73, 0x6b, 0x12, + 0x4c, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, + 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, + 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x21, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x04, 0x50, 0x69, 0x64, 0x73, 0x12, 0x1f, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, + 0x76, 0x32, 0x2e, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x41, 0x0a, 0x05, 0x50, 0x61, 0x75, 0x73, 0x65, 0x12, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x50, + 0x61, 0x75, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x21, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x0a, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x04, 0x4b, 0x69, 0x6c, + 0x6c, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x4b, 0x69, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x04, 0x45, 0x78, + 0x65, 0x63, 0x12, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x12, 0x49, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x50, 0x74, 0x79, 0x12, + 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, + 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x50, 0x74, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x45, 0x0a, + 0x07, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x49, 0x4f, 0x12, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, + 0x6f, 0x73, 0x65, 0x49, 0x4f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x47, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x25, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x49, 0x0a, + 0x04, 0x57, 0x61, 0x69, 0x74, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x61, 0x69, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x12, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x12, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x08, 0x53, 0x68, + 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x68, 0x75, 0x74, + 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2f, 0x76, 0x32, 0x3b, 0x74, 0x61, 0x73, 0x6b, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescData = file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_goTypes = []interface{}{ + (*CreateTaskRequest)(nil), // 0: containerd.task.v2.CreateTaskRequest + (*CreateTaskResponse)(nil), // 1: containerd.task.v2.CreateTaskResponse + (*DeleteRequest)(nil), // 2: containerd.task.v2.DeleteRequest + (*DeleteResponse)(nil), // 3: containerd.task.v2.DeleteResponse + (*ExecProcessRequest)(nil), // 4: containerd.task.v2.ExecProcessRequest + (*ExecProcessResponse)(nil), // 5: containerd.task.v2.ExecProcessResponse + (*ResizePtyRequest)(nil), // 6: containerd.task.v2.ResizePtyRequest + (*StateRequest)(nil), // 7: containerd.task.v2.StateRequest + (*StateResponse)(nil), // 8: containerd.task.v2.StateResponse + (*KillRequest)(nil), // 9: containerd.task.v2.KillRequest + (*CloseIORequest)(nil), // 10: containerd.task.v2.CloseIORequest + (*PidsRequest)(nil), // 11: containerd.task.v2.PidsRequest + (*PidsResponse)(nil), // 12: containerd.task.v2.PidsResponse + (*CheckpointTaskRequest)(nil), // 13: containerd.task.v2.CheckpointTaskRequest + (*UpdateTaskRequest)(nil), // 14: containerd.task.v2.UpdateTaskRequest + (*StartRequest)(nil), // 15: containerd.task.v2.StartRequest + (*StartResponse)(nil), // 16: containerd.task.v2.StartResponse + (*WaitRequest)(nil), // 17: containerd.task.v2.WaitRequest + (*WaitResponse)(nil), // 18: containerd.task.v2.WaitResponse + (*StatsRequest)(nil), // 19: containerd.task.v2.StatsRequest + (*StatsResponse)(nil), // 20: containerd.task.v2.StatsResponse + (*ConnectRequest)(nil), // 21: containerd.task.v2.ConnectRequest + (*ConnectResponse)(nil), // 22: containerd.task.v2.ConnectResponse + (*ShutdownRequest)(nil), // 23: containerd.task.v2.ShutdownRequest + (*PauseRequest)(nil), // 24: containerd.task.v2.PauseRequest + (*ResumeRequest)(nil), // 25: containerd.task.v2.ResumeRequest + nil, // 26: containerd.task.v2.UpdateTaskRequest.AnnotationsEntry + (*types.Mount)(nil), // 27: containerd.types.Mount + (*anypb.Any)(nil), // 28: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 29: google.protobuf.Timestamp + (task.Status)(0), // 30: containerd.v1.types.Status + (*task.ProcessInfo)(nil), // 31: containerd.v1.types.ProcessInfo + (*emptypb.Empty)(nil), // 32: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_depIdxs = []int32{ + 27, // 0: containerd.task.v2.CreateTaskRequest.rootfs:type_name -> containerd.types.Mount + 28, // 1: containerd.task.v2.CreateTaskRequest.options:type_name -> google.protobuf.Any + 29, // 2: containerd.task.v2.DeleteResponse.exited_at:type_name -> google.protobuf.Timestamp + 28, // 3: containerd.task.v2.ExecProcessRequest.spec:type_name -> google.protobuf.Any + 30, // 4: containerd.task.v2.StateResponse.status:type_name -> containerd.v1.types.Status + 29, // 5: containerd.task.v2.StateResponse.exited_at:type_name -> google.protobuf.Timestamp + 31, // 6: containerd.task.v2.PidsResponse.processes:type_name -> containerd.v1.types.ProcessInfo + 28, // 7: containerd.task.v2.CheckpointTaskRequest.options:type_name -> google.protobuf.Any + 28, // 8: containerd.task.v2.UpdateTaskRequest.resources:type_name -> google.protobuf.Any + 26, // 9: containerd.task.v2.UpdateTaskRequest.annotations:type_name -> containerd.task.v2.UpdateTaskRequest.AnnotationsEntry + 29, // 10: containerd.task.v2.WaitResponse.exited_at:type_name -> google.protobuf.Timestamp + 28, // 11: containerd.task.v2.StatsResponse.stats:type_name -> google.protobuf.Any + 7, // 12: containerd.task.v2.Task.State:input_type -> containerd.task.v2.StateRequest + 0, // 13: containerd.task.v2.Task.Create:input_type -> containerd.task.v2.CreateTaskRequest + 15, // 14: containerd.task.v2.Task.Start:input_type -> containerd.task.v2.StartRequest + 2, // 15: containerd.task.v2.Task.Delete:input_type -> containerd.task.v2.DeleteRequest + 11, // 16: containerd.task.v2.Task.Pids:input_type -> containerd.task.v2.PidsRequest + 24, // 17: containerd.task.v2.Task.Pause:input_type -> containerd.task.v2.PauseRequest + 25, // 18: containerd.task.v2.Task.Resume:input_type -> containerd.task.v2.ResumeRequest + 13, // 19: containerd.task.v2.Task.Checkpoint:input_type -> containerd.task.v2.CheckpointTaskRequest + 9, // 20: containerd.task.v2.Task.Kill:input_type -> containerd.task.v2.KillRequest + 4, // 21: containerd.task.v2.Task.Exec:input_type -> containerd.task.v2.ExecProcessRequest + 6, // 22: containerd.task.v2.Task.ResizePty:input_type -> containerd.task.v2.ResizePtyRequest + 10, // 23: containerd.task.v2.Task.CloseIO:input_type -> containerd.task.v2.CloseIORequest + 14, // 24: containerd.task.v2.Task.Update:input_type -> containerd.task.v2.UpdateTaskRequest + 17, // 25: containerd.task.v2.Task.Wait:input_type -> containerd.task.v2.WaitRequest + 19, // 26: containerd.task.v2.Task.Stats:input_type -> containerd.task.v2.StatsRequest + 21, // 27: containerd.task.v2.Task.Connect:input_type -> containerd.task.v2.ConnectRequest + 23, // 28: containerd.task.v2.Task.Shutdown:input_type -> containerd.task.v2.ShutdownRequest + 8, // 29: containerd.task.v2.Task.State:output_type -> containerd.task.v2.StateResponse + 1, // 30: containerd.task.v2.Task.Create:output_type -> containerd.task.v2.CreateTaskResponse + 16, // 31: containerd.task.v2.Task.Start:output_type -> containerd.task.v2.StartResponse + 3, // 32: containerd.task.v2.Task.Delete:output_type -> containerd.task.v2.DeleteResponse + 12, // 33: containerd.task.v2.Task.Pids:output_type -> containerd.task.v2.PidsResponse + 32, // 34: containerd.task.v2.Task.Pause:output_type -> google.protobuf.Empty + 32, // 35: containerd.task.v2.Task.Resume:output_type -> google.protobuf.Empty + 32, // 36: containerd.task.v2.Task.Checkpoint:output_type -> google.protobuf.Empty + 32, // 37: containerd.task.v2.Task.Kill:output_type -> google.protobuf.Empty + 32, // 38: containerd.task.v2.Task.Exec:output_type -> google.protobuf.Empty + 32, // 39: containerd.task.v2.Task.ResizePty:output_type -> google.protobuf.Empty + 32, // 40: containerd.task.v2.Task.CloseIO:output_type -> google.protobuf.Empty + 32, // 41: containerd.task.v2.Task.Update:output_type -> google.protobuf.Empty + 18, // 42: containerd.task.v2.Task.Wait:output_type -> containerd.task.v2.WaitResponse + 20, // 43: containerd.task.v2.Task.Stats:output_type -> containerd.task.v2.StatsResponse + 22, // 44: containerd.task.v2.Task.Connect:output_type -> containerd.task.v2.ConnectResponse + 32, // 45: containerd.task.v2.Task.Shutdown:output_type -> google.protobuf.Empty + 29, // [29:46] is the sub-list for method output_type + 12, // [12:29] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_init() } +func file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_init() { + if File_github_com_containerd_containerd_api_runtime_task_v2_shim_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateTaskResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecProcessRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecProcessResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResizePtyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KillRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloseIORequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PidsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PidsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckpointTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WaitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WaitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShutdownRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PauseRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDesc, + NumEnums: 0, + NumMessages: 27, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_runtime_task_v2_shim_proto = out.File + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDesc = nil + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_goTypes = nil + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto b/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto similarity index 91% rename from vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto rename to vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto index df77d57826..aad1bd66fd 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto +++ b/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto @@ -20,12 +20,11 @@ package containerd.task.v2; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; -import weak "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; import "github.com/containerd/containerd/api/types/task/task.proto"; -option go_package = "github.com/containerd/containerd/runtime/v2/task;task"; +option go_package = "github.com/containerd/containerd/api/runtime/task/v2;task"; // Shim service is launched for each container and is responsible for owning the IO // for the container and its additional processes. The shim is also the parent of @@ -76,7 +75,7 @@ message DeleteRequest { message DeleteResponse { uint32 pid = 1; uint32 exit_status = 2; - google.protobuf.Timestamp exited_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 3; } message ExecProcessRequest { @@ -114,7 +113,7 @@ message StateResponse { string stderr = 7; bool terminal = 8; uint32 exit_status = 9; - google.protobuf.Timestamp exited_at = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 10; string exec_id = 11; } @@ -167,7 +166,7 @@ message WaitRequest { message WaitResponse { uint32 exit_status = 1; - google.protobuf.Timestamp exited_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 2; } message StatsRequest { diff --git a/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim_ttrpc.pb.go new file mode 100644 index 0000000000..1210371c2c --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim_ttrpc.pb.go @@ -0,0 +1,301 @@ +// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT. +// source: github.com/containerd/containerd/api/runtime/task/v2/shim.proto +package task + +import ( + context "context" + ttrpc "github.com/containerd/ttrpc" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +type TaskService interface { + State(context.Context, *StateRequest) (*StateResponse, error) + Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) + Start(context.Context, *StartRequest) (*StartResponse, error) + Delete(context.Context, *DeleteRequest) (*DeleteResponse, error) + Pids(context.Context, *PidsRequest) (*PidsResponse, error) + Pause(context.Context, *PauseRequest) (*emptypb.Empty, error) + Resume(context.Context, *ResumeRequest) (*emptypb.Empty, error) + Checkpoint(context.Context, *CheckpointTaskRequest) (*emptypb.Empty, error) + Kill(context.Context, *KillRequest) (*emptypb.Empty, error) + Exec(context.Context, *ExecProcessRequest) (*emptypb.Empty, error) + ResizePty(context.Context, *ResizePtyRequest) (*emptypb.Empty, error) + CloseIO(context.Context, *CloseIORequest) (*emptypb.Empty, error) + Update(context.Context, *UpdateTaskRequest) (*emptypb.Empty, error) + Wait(context.Context, *WaitRequest) (*WaitResponse, error) + Stats(context.Context, *StatsRequest) (*StatsResponse, error) + Connect(context.Context, *ConnectRequest) (*ConnectResponse, error) + Shutdown(context.Context, *ShutdownRequest) (*emptypb.Empty, error) +} + +func RegisterTaskService(srv *ttrpc.Server, svc TaskService) { + srv.RegisterService("containerd.task.v2.Task", &ttrpc.ServiceDesc{ + Methods: map[string]ttrpc.Method{ + "State": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StateRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.State(ctx, &req) + }, + "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CreateTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Create(ctx, &req) + }, + "Start": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StartRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Start(ctx, &req) + }, + "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req DeleteRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Delete(ctx, &req) + }, + "Pids": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req PidsRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Pids(ctx, &req) + }, + "Pause": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req PauseRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Pause(ctx, &req) + }, + "Resume": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ResumeRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Resume(ctx, &req) + }, + "Checkpoint": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CheckpointTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Checkpoint(ctx, &req) + }, + "Kill": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req KillRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Kill(ctx, &req) + }, + "Exec": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ExecProcessRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Exec(ctx, &req) + }, + "ResizePty": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ResizePtyRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.ResizePty(ctx, &req) + }, + "CloseIO": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CloseIORequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.CloseIO(ctx, &req) + }, + "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req UpdateTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Update(ctx, &req) + }, + "Wait": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req WaitRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Wait(ctx, &req) + }, + "Stats": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StatsRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Stats(ctx, &req) + }, + "Connect": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ConnectRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Connect(ctx, &req) + }, + "Shutdown": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ShutdownRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Shutdown(ctx, &req) + }, + }, + }) +} + +type taskClient struct { + client *ttrpc.Client +} + +func NewTaskClient(client *ttrpc.Client) TaskService { + return &taskClient{ + client: client, + } +} + +func (c *taskClient) State(ctx context.Context, req *StateRequest) (*StateResponse, error) { + var resp StateResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "State", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) { + var resp CreateTaskResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Create", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) { + var resp StartResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Start", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Delete(ctx context.Context, req *DeleteRequest) (*DeleteResponse, error) { + var resp DeleteResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Delete", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Pids(ctx context.Context, req *PidsRequest) (*PidsResponse, error) { + var resp PidsResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Pids", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Pause(ctx context.Context, req *PauseRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Pause", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Resume(ctx context.Context, req *ResumeRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Resume", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Checkpoint", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Kill(ctx context.Context, req *KillRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Kill", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Exec(ctx context.Context, req *ExecProcessRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Exec", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "ResizePty", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) CloseIO(ctx context.Context, req *CloseIORequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "CloseIO", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Update(ctx context.Context, req *UpdateTaskRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Update", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) { + var resp WaitResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Wait", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Stats(ctx context.Context, req *StatsRequest) (*StatsResponse, error) { + var resp StatsResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Stats", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Connect(ctx context.Context, req *ConnectRequest) (*ConnectResponse, error) { + var resp ConnectResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Connect", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Shutdown(ctx context.Context, req *ShutdownRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Shutdown", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go index 8c84d9ca46..aab9e45b12 100644 --- a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go @@ -1,39 +1,49 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/containers/v1/containers.proto package containers import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Container struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // ID is the user-specified identifier. // // This field may not be updated. @@ -53,7 +63,7 @@ type Container struct { // Runtime specifies which runtime to use for executing this container. Runtime *Container_Runtime `protobuf:"bytes,4,opt,name=runtime,proto3" json:"runtime,omitempty"` // Spec to be used when creating the container. This is runtime specific. - Spec *types.Any `protobuf:"bytes,5,opt,name=spec,proto3" json:"spec,omitempty"` + Spec *anypb.Any `protobuf:"bytes,5,opt,name=spec,proto3" json:"spec,omitempty"` // Snapshotter specifies the snapshotter name used for rootfs Snapshotter string `protobuf:"bytes,6,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` // SnapshotKey specifies the snapshot key to use for the container's root @@ -68,9 +78,9 @@ type Container struct { // This field may be updated. SnapshotKey string `protobuf:"bytes,7,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"` // CreatedAt is the time the container was first created. - CreatedAt time.Time `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` // UpdatedAt is the last time the container was mutated. - UpdatedAt time.Time `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` // Extensions allow clients to provide zero or more blobs that are directly // associated with the container. One may provide protobuf, json, or other // encoding formats. The primary use of this is to further decorate the @@ -80,165 +90,219 @@ type Container struct { // that should be unique against other extensions. When updating extension // data, one should only update the specified extension using field paths // to select a specific map key. - Extensions map[string]types.Any `protobuf:"bytes,10,rep,name=extensions,proto3" json:"extensions" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Extensions map[string]*anypb.Any `protobuf:"bytes,10,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Sandbox ID this container belongs to. + Sandbox string `protobuf:"bytes,11,opt,name=sandbox,proto3" json:"sandbox,omitempty"` +} + +func (x *Container) Reset() { + *x = Container{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Container) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} + +func (x *Container) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Container.ProtoReflect.Descriptor instead. func (*Container) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{0} + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{0} } -func (m *Container) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Container.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + +func (x *Container) GetID() string { + if x != nil { + return x.ID } -} -func (m *Container) XXX_Merge(src proto.Message) { - xxx_messageInfo_Container.Merge(m, src) -} -func (m *Container) XXX_Size() int { - return m.Size() -} -func (m *Container) XXX_DiscardUnknown() { - xxx_messageInfo_Container.DiscardUnknown(m) + return "" } -var xxx_messageInfo_Container proto.InternalMessageInfo - -type Container_Runtime struct { - // Name is the name of the runtime. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Options specify additional runtime initialization options. - Options *types.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Container_Runtime) Reset() { *m = Container_Runtime{} } -func (*Container_Runtime) ProtoMessage() {} -func (*Container_Runtime) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{0, 1} -} -func (m *Container_Runtime) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Container_Runtime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Container_Runtime.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Container) GetLabels() map[string]string { + if x != nil { + return x.Labels } -} -func (m *Container_Runtime) XXX_Merge(src proto.Message) { - xxx_messageInfo_Container_Runtime.Merge(m, src) -} -func (m *Container_Runtime) XXX_Size() int { - return m.Size() -} -func (m *Container_Runtime) XXX_DiscardUnknown() { - xxx_messageInfo_Container_Runtime.DiscardUnknown(m) + return nil } -var xxx_messageInfo_Container_Runtime proto.InternalMessageInfo +func (x *Container) GetImage() string { + if x != nil { + return x.Image + } + return "" +} + +func (x *Container) GetRuntime() *Container_Runtime { + if x != nil { + return x.Runtime + } + return nil +} + +func (x *Container) GetSpec() *anypb.Any { + if x != nil { + return x.Spec + } + return nil +} + +func (x *Container) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +func (x *Container) GetSnapshotKey() string { + if x != nil { + return x.SnapshotKey + } + return "" +} + +func (x *Container) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *Container) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *Container) GetExtensions() map[string]*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Container) GetSandbox() string { + if x != nil { + return x.Sandbox + } + return "" +} type GetContainerRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } -func (m *GetContainerRequest) Reset() { *m = GetContainerRequest{} } -func (*GetContainerRequest) ProtoMessage() {} -func (*GetContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{1} -} -func (m *GetContainerRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetContainerRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetContainerRequest) Reset() { + *x = GetContainerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *GetContainerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetContainerRequest.Merge(m, src) -} -func (m *GetContainerRequest) XXX_Size() int { - return m.Size() -} -func (m *GetContainerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetContainerRequest.DiscardUnknown(m) + +func (x *GetContainerRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_GetContainerRequest proto.InternalMessageInfo +func (*GetContainerRequest) ProtoMessage() {} + +func (x *GetContainerRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetContainerRequest.ProtoReflect.Descriptor instead. +func (*GetContainerRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{1} +} + +func (x *GetContainerRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} type GetContainerResponse struct { - Container Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` } -func (m *GetContainerResponse) Reset() { *m = GetContainerResponse{} } -func (*GetContainerResponse) ProtoMessage() {} -func (*GetContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{2} -} -func (m *GetContainerResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetContainerResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetContainerResponse) Reset() { + *x = GetContainerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *GetContainerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetContainerResponse.Merge(m, src) -} -func (m *GetContainerResponse) XXX_Size() int { - return m.Size() -} -func (m *GetContainerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetContainerResponse.DiscardUnknown(m) + +func (x *GetContainerResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_GetContainerResponse proto.InternalMessageInfo +func (*GetContainerResponse) ProtoMessage() {} + +func (x *GetContainerResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetContainerResponse.ProtoReflect.Descriptor instead. +func (*GetContainerResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{2} +} + +func (x *GetContainerResponse) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil +} type ListContainersRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Filters contains one or more filters using the syntax defined in the // containerd filter package. // @@ -249,160 +313,188 @@ type ListContainersRequest struct { // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func (m *ListContainersRequest) Reset() { *m = ListContainersRequest{} } -func (*ListContainersRequest) ProtoMessage() {} -func (*ListContainersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{3} -} -func (m *ListContainersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListContainersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListContainersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListContainersRequest) Reset() { + *x = ListContainersRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListContainersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListContainersRequest.Merge(m, src) -} -func (m *ListContainersRequest) XXX_Size() int { - return m.Size() -} -func (m *ListContainersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListContainersRequest.DiscardUnknown(m) + +func (x *ListContainersRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListContainersRequest proto.InternalMessageInfo +func (*ListContainersRequest) ProtoMessage() {} + +func (x *ListContainersRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListContainersRequest.ProtoReflect.Descriptor instead. +func (*ListContainersRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{3} +} + +func (x *ListContainersRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil +} type ListContainersResponse struct { - Containers []Container `protobuf:"bytes,1,rep,name=containers,proto3" json:"containers"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Containers []*Container `protobuf:"bytes,1,rep,name=containers,proto3" json:"containers,omitempty"` } -func (m *ListContainersResponse) Reset() { *m = ListContainersResponse{} } -func (*ListContainersResponse) ProtoMessage() {} -func (*ListContainersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{4} -} -func (m *ListContainersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListContainersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListContainersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListContainersResponse) Reset() { + *x = ListContainersResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListContainersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListContainersResponse.Merge(m, src) -} -func (m *ListContainersResponse) XXX_Size() int { - return m.Size() -} -func (m *ListContainersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListContainersResponse.DiscardUnknown(m) + +func (x *ListContainersResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListContainersResponse proto.InternalMessageInfo +func (*ListContainersResponse) ProtoMessage() {} + +func (x *ListContainersResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListContainersResponse.ProtoReflect.Descriptor instead. +func (*ListContainersResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{4} +} + +func (x *ListContainersResponse) GetContainers() []*Container { + if x != nil { + return x.Containers + } + return nil +} type CreateContainerRequest struct { - Container Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` } -func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } -func (*CreateContainerRequest) ProtoMessage() {} -func (*CreateContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{5} -} -func (m *CreateContainerRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateContainerRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CreateContainerRequest) Reset() { + *x = CreateContainerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CreateContainerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateContainerRequest.Merge(m, src) -} -func (m *CreateContainerRequest) XXX_Size() int { - return m.Size() -} -func (m *CreateContainerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateContainerRequest.DiscardUnknown(m) + +func (x *CreateContainerRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CreateContainerRequest proto.InternalMessageInfo +func (*CreateContainerRequest) ProtoMessage() {} + +func (x *CreateContainerRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateContainerRequest.ProtoReflect.Descriptor instead. +func (*CreateContainerRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{5} +} + +func (x *CreateContainerRequest) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil +} type CreateContainerResponse struct { - Container Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` } -func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} } -func (*CreateContainerResponse) ProtoMessage() {} -func (*CreateContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{6} -} -func (m *CreateContainerResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateContainerResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CreateContainerResponse) Reset() { + *x = CreateContainerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CreateContainerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateContainerResponse.Merge(m, src) -} -func (m *CreateContainerResponse) XXX_Size() int { - return m.Size() -} -func (m *CreateContainerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateContainerResponse.DiscardUnknown(m) + +func (x *CreateContainerResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CreateContainerResponse proto.InternalMessageInfo +func (*CreateContainerResponse) ProtoMessage() {} + +func (x *CreateContainerResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateContainerResponse.ProtoReflect.Descriptor instead. +func (*CreateContainerResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{6} +} + +func (x *CreateContainerResponse) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil +} // UpdateContainerRequest updates the metadata on one or more container. // @@ -410,3175 +502,677 @@ var xxx_messageInfo_CreateContainerResponse proto.InternalMessageInfo // https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask, // unless otherwise qualified. type UpdateContainerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Container provides the target values, as declared by the mask, for the update. // // The ID field must be set. - Container Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"` + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. - UpdateMask *types.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } -func (m *UpdateContainerRequest) Reset() { *m = UpdateContainerRequest{} } -func (*UpdateContainerRequest) ProtoMessage() {} -func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{7} -} -func (m *UpdateContainerRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateContainerRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateContainerRequest) Reset() { + *x = UpdateContainerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateContainerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateContainerRequest.Merge(m, src) -} -func (m *UpdateContainerRequest) XXX_Size() int { - return m.Size() -} -func (m *UpdateContainerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateContainerRequest.DiscardUnknown(m) + +func (x *UpdateContainerRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_UpdateContainerRequest proto.InternalMessageInfo +func (*UpdateContainerRequest) ProtoMessage() {} + +func (x *UpdateContainerRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateContainerRequest.ProtoReflect.Descriptor instead. +func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{7} +} + +func (x *UpdateContainerRequest) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil +} + +func (x *UpdateContainerRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} type UpdateContainerResponse struct { - Container Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` } -func (m *UpdateContainerResponse) Reset() { *m = UpdateContainerResponse{} } -func (*UpdateContainerResponse) ProtoMessage() {} -func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{8} -} -func (m *UpdateContainerResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateContainerResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateContainerResponse) Reset() { + *x = UpdateContainerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateContainerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateContainerResponse.Merge(m, src) -} -func (m *UpdateContainerResponse) XXX_Size() int { - return m.Size() -} -func (m *UpdateContainerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateContainerResponse.DiscardUnknown(m) + +func (x *UpdateContainerResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_UpdateContainerResponse proto.InternalMessageInfo +func (*UpdateContainerResponse) ProtoMessage() {} + +func (x *UpdateContainerResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateContainerResponse.ProtoReflect.Descriptor instead. +func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{8} +} + +func (x *UpdateContainerResponse) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil +} type DeleteContainerRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } -func (m *DeleteContainerRequest) Reset() { *m = DeleteContainerRequest{} } -func (*DeleteContainerRequest) ProtoMessage() {} -func (*DeleteContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{9} -} -func (m *DeleteContainerRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteContainerRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteContainerRequest) Reset() { + *x = DeleteContainerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteContainerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteContainerRequest.Merge(m, src) -} -func (m *DeleteContainerRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteContainerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteContainerRequest.DiscardUnknown(m) + +func (x *DeleteContainerRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_DeleteContainerRequest proto.InternalMessageInfo +func (*DeleteContainerRequest) ProtoMessage() {} + +func (x *DeleteContainerRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteContainerRequest.ProtoReflect.Descriptor instead. +func (*DeleteContainerRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{9} +} + +func (x *DeleteContainerRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} type ListContainerMessage struct { - Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` +} + +func (x *ListContainerMessage) Reset() { + *x = ListContainerMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListContainerMessage) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListContainerMessage) Reset() { *m = ListContainerMessage{} } func (*ListContainerMessage) ProtoMessage() {} + +func (x *ListContainerMessage) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListContainerMessage.ProtoReflect.Descriptor instead. func (*ListContainerMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{10} + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{10} } -func (m *ListContainerMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListContainerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListContainerMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListContainerMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListContainerMessage.Merge(m, src) -} -func (m *ListContainerMessage) XXX_Size() int { - return m.Size() -} -func (m *ListContainerMessage) XXX_DiscardUnknown() { - xxx_messageInfo_ListContainerMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_ListContainerMessage proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Container)(nil), "containerd.services.containers.v1.Container") - proto.RegisterMapType((map[string]types.Any)(nil), "containerd.services.containers.v1.Container.ExtensionsEntry") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.containers.v1.Container.LabelsEntry") - proto.RegisterType((*Container_Runtime)(nil), "containerd.services.containers.v1.Container.Runtime") - proto.RegisterType((*GetContainerRequest)(nil), "containerd.services.containers.v1.GetContainerRequest") - proto.RegisterType((*GetContainerResponse)(nil), "containerd.services.containers.v1.GetContainerResponse") - proto.RegisterType((*ListContainersRequest)(nil), "containerd.services.containers.v1.ListContainersRequest") - proto.RegisterType((*ListContainersResponse)(nil), "containerd.services.containers.v1.ListContainersResponse") - proto.RegisterType((*CreateContainerRequest)(nil), "containerd.services.containers.v1.CreateContainerRequest") - proto.RegisterType((*CreateContainerResponse)(nil), "containerd.services.containers.v1.CreateContainerResponse") - proto.RegisterType((*UpdateContainerRequest)(nil), "containerd.services.containers.v1.UpdateContainerRequest") - proto.RegisterType((*UpdateContainerResponse)(nil), "containerd.services.containers.v1.UpdateContainerResponse") - proto.RegisterType((*DeleteContainerRequest)(nil), "containerd.services.containers.v1.DeleteContainerRequest") - proto.RegisterType((*ListContainerMessage)(nil), "containerd.services.containers.v1.ListContainerMessage") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/containers/v1/containers.proto", fileDescriptor_311afb8e15951042) -} - -var fileDescriptor_311afb8e15951042 = []byte{ - // 820 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcb, 0x6e, 0x13, 0x49, - 0x14, 0x75, 0xdb, 0x4e, 0x3b, 0xbe, 0x1e, 0x69, 0x46, 0x35, 0x1e, 0x4f, 0x4f, 0x8f, 0x64, 0x3b, - 0x5e, 0x59, 0xa3, 0xa1, 0x9d, 0x18, 0x44, 0x5e, 0x6c, 0xe2, 0xbc, 0x04, 0x24, 0x28, 0xea, 0x80, - 0x84, 0x60, 0x11, 0xda, 0x76, 0xc5, 0x69, 0xdc, 0x2f, 0xba, 0xca, 0x16, 0x16, 0x8b, 0xc0, 0x1f, - 0xb0, 0xe3, 0x13, 0xf8, 0x95, 0x2c, 0x59, 0xb2, 0x0a, 0xc4, 0xe2, 0x43, 0x50, 0x57, 0x57, 0xbb, - 0x3b, 0x7e, 0x80, 0x9d, 0x90, 0x5d, 0x5d, 0xd7, 0x3d, 0xf7, 0x9e, 0x3a, 0xb7, 0x4e, 0xb9, 0x61, - 0xaf, 0xa5, 0xd3, 0x93, 0x4e, 0x5d, 0x69, 0xd8, 0x66, 0xa5, 0x61, 0x5b, 0x54, 0xd3, 0x2d, 0xec, - 0x36, 0xa3, 0x4b, 0xcd, 0xd1, 0x2b, 0x04, 0xbb, 0x5d, 0xbd, 0x81, 0x49, 0xf8, 0x3b, 0xa9, 0x74, - 0x97, 0x22, 0x91, 0xe2, 0xb8, 0x36, 0xb5, 0xd1, 0x42, 0x88, 0x53, 0x02, 0x8c, 0x12, 0xc9, 0xea, - 0x2e, 0xc9, 0xd9, 0x96, 0xdd, 0xb2, 0x59, 0x76, 0xc5, 0x5b, 0xf9, 0x40, 0xf9, 0x9f, 0x96, 0x6d, - 0xb7, 0x0c, 0x5c, 0x61, 0x51, 0xbd, 0x73, 0x5c, 0xd1, 0xac, 0x1e, 0xdf, 0xfa, 0x77, 0x78, 0x0b, - 0x9b, 0x0e, 0x0d, 0x36, 0x8b, 0xc3, 0x9b, 0xc7, 0x3a, 0x36, 0x9a, 0x47, 0xa6, 0x46, 0xda, 0x3c, - 0xa3, 0x30, 0x9c, 0x41, 0x75, 0x13, 0x13, 0xaa, 0x99, 0x8e, 0x9f, 0x50, 0xfa, 0x20, 0x42, 0x7a, - 0x33, 0xa0, 0x88, 0x72, 0x10, 0xd7, 0x9b, 0x92, 0x50, 0x14, 0xca, 0xe9, 0x9a, 0xd8, 0x3f, 0x2f, - 0xc4, 0xef, 0x6f, 0xa9, 0x71, 0xbd, 0x89, 0x0e, 0x40, 0x34, 0xb4, 0x3a, 0x36, 0x88, 0x14, 0x2f, - 0x26, 0xca, 0x99, 0xea, 0x8a, 0xf2, 0xd3, 0xa3, 0x2a, 0x83, 0xaa, 0xca, 0x1e, 0x83, 0x6e, 0x5b, - 0xd4, 0xed, 0xa9, 0xbc, 0x0e, 0xca, 0xc2, 0x9c, 0x6e, 0x6a, 0x2d, 0x2c, 0x25, 0xbc, 0x66, 0xaa, - 0x1f, 0xa0, 0x47, 0x90, 0x72, 0x3b, 0x96, 0xc7, 0x51, 0x4a, 0x16, 0x85, 0x72, 0xa6, 0x7a, 0x67, - 0xa6, 0x46, 0xaa, 0x8f, 0x55, 0x83, 0x22, 0xa8, 0x0c, 0x49, 0xe2, 0xe0, 0x86, 0x34, 0xc7, 0x8a, - 0x65, 0x15, 0x5f, 0x0d, 0x25, 0x50, 0x43, 0xd9, 0xb0, 0x7a, 0x2a, 0xcb, 0x40, 0x45, 0xc8, 0x10, - 0x4b, 0x73, 0xc8, 0x89, 0x4d, 0x29, 0x76, 0x25, 0x91, 0xb1, 0x8a, 0xfe, 0x84, 0x16, 0xe0, 0xb7, - 0x20, 0x3c, 0x6a, 0xe3, 0x9e, 0x94, 0xba, 0x9c, 0xf2, 0x10, 0xf7, 0xd0, 0x26, 0x40, 0xc3, 0xc5, - 0x1a, 0xc5, 0xcd, 0x23, 0x8d, 0x4a, 0xf3, 0xac, 0xa9, 0x3c, 0xd2, 0xf4, 0x71, 0x30, 0x82, 0xda, - 0xfc, 0xd9, 0x79, 0x21, 0xf6, 0xfe, 0x4b, 0x41, 0x50, 0xd3, 0x1c, 0xb7, 0x41, 0xbd, 0x22, 0x1d, - 0xa7, 0x19, 0x14, 0x49, 0xcf, 0x52, 0x84, 0xe3, 0x36, 0x28, 0xaa, 0x03, 0xe0, 0xd7, 0x14, 0x5b, - 0x44, 0xb7, 0x2d, 0x22, 0x01, 0x1b, 0xda, 0xbd, 0x99, 0xb4, 0xdc, 0x1e, 0xc0, 0xd9, 0xe0, 0x6a, - 0x49, 0xaf, 0x8d, 0x1a, 0xa9, 0x2a, 0xaf, 0x42, 0x26, 0x32, 0x59, 0xf4, 0x07, 0x24, 0x3c, 0x59, - 0xd8, 0xe5, 0x51, 0xbd, 0xa5, 0x37, 0xe3, 0xae, 0x66, 0x74, 0xb0, 0x14, 0xf7, 0x67, 0xcc, 0x82, - 0xb5, 0xf8, 0x8a, 0x20, 0xef, 0x43, 0x8a, 0xcf, 0x0a, 0x21, 0x48, 0x5a, 0x9a, 0x89, 0x39, 0x8e, - 0xad, 0x91, 0x02, 0x29, 0xdb, 0xa1, 0x8c, 0x7a, 0xfc, 0x07, 0x93, 0x0b, 0x92, 0xe4, 0x43, 0xf8, - 0x7d, 0x88, 0xee, 0x18, 0x36, 0xff, 0x45, 0xd9, 0x4c, 0x2a, 0x19, 0x72, 0x2c, 0xdd, 0x82, 0x3f, - 0x77, 0x31, 0x1d, 0x08, 0xa2, 0xe2, 0x57, 0x1d, 0x4c, 0xe8, 0x24, 0x8b, 0x94, 0x4e, 0x20, 0x7b, - 0x39, 0x9d, 0x38, 0xb6, 0x45, 0x30, 0x3a, 0x80, 0xf4, 0x40, 0x62, 0x06, 0xcb, 0x54, 0xff, 0x9f, - 0x65, 0x10, 0x5c, 0xf8, 0xb0, 0x48, 0x69, 0x09, 0xfe, 0xda, 0xd3, 0x49, 0xd8, 0x8a, 0x04, 0xd4, - 0x24, 0x48, 0x1d, 0xeb, 0x06, 0xc5, 0x2e, 0x91, 0x84, 0x62, 0xa2, 0x9c, 0x56, 0x83, 0xb0, 0x64, - 0x40, 0x6e, 0x18, 0xc2, 0xe9, 0xa9, 0x00, 0x61, 0x63, 0x06, 0xbb, 0x1a, 0xbf, 0x48, 0x95, 0xd2, - 0x4b, 0xc8, 0x6d, 0xb2, 0xeb, 0x3c, 0x22, 0xde, 0xaf, 0x17, 0xa3, 0x0d, 0x7f, 0x8f, 0xf4, 0xba, - 0x31, 0xe5, 0x3f, 0x0a, 0x90, 0x7b, 0xc2, 0x3c, 0x76, 0xf3, 0x27, 0x43, 0xeb, 0x90, 0xf1, 0xfd, - 0xcc, 0xde, 0x73, 0x7e, 0x6b, 0x47, 0x1f, 0x82, 0x1d, 0xef, 0xc9, 0xdf, 0xd7, 0x48, 0x5b, 0xe5, - 0xcf, 0x86, 0xb7, 0xf6, 0x64, 0x19, 0x21, 0x7a, 0x63, 0xb2, 0x2c, 0x42, 0x6e, 0x0b, 0x1b, 0x78, - 0x8c, 0x2a, 0x93, 0xcc, 0x52, 0x87, 0xec, 0xa5, 0xfb, 0xb8, 0x8f, 0x09, 0xf1, 0xde, 0xff, 0x07, - 0xd7, 0xe4, 0x16, 0x61, 0x55, 0xfd, 0x36, 0x07, 0x10, 0x5e, 0x78, 0xd4, 0x85, 0xc4, 0x2e, 0xa6, - 0xe8, 0xee, 0x14, 0xe5, 0xc6, 0xd8, 0x5e, 0x5e, 0x9e, 0x19, 0xc7, 0xe5, 0x7e, 0x03, 0x49, 0xef, - 0xa8, 0x68, 0x9a, 0xbf, 0xcc, 0xb1, 0xb6, 0x96, 0x57, 0xaf, 0x80, 0xe4, 0xcd, 0xdf, 0x09, 0x00, - 0xde, 0xd6, 0x21, 0x75, 0xb1, 0x66, 0x5e, 0x83, 0xc3, 0xf2, 0xac, 0x48, 0x3e, 0xd1, 0x45, 0x01, - 0x9d, 0x82, 0xe8, 0x3b, 0x14, 0x4d, 0x73, 0x90, 0xf1, 0x0f, 0x87, 0xbc, 0x76, 0x15, 0x28, 0x17, - 0xe1, 0x14, 0x44, 0xdf, 0x0b, 0x53, 0x11, 0x18, 0xef, 0xef, 0xa9, 0x08, 0x4c, 0x72, 0xdc, 0x73, - 0x10, 0x7d, 0x7f, 0x4c, 0x45, 0x60, 0xbc, 0x95, 0xe4, 0xdc, 0x88, 0xf3, 0xb7, 0xbd, 0x2f, 0xc1, - 0xda, 0x8b, 0xb3, 0x8b, 0x7c, 0xec, 0xf3, 0x45, 0x3e, 0xf6, 0xb6, 0x9f, 0x17, 0xce, 0xfa, 0x79, - 0xe1, 0x53, 0x3f, 0x2f, 0x7c, 0xed, 0xe7, 0x85, 0x67, 0x3b, 0xd7, 0xf8, 0xb8, 0x5d, 0x0f, 0xa3, - 0xa7, 0xb1, 0xba, 0xc8, 0x7a, 0xde, 0xfe, 0x1e, 0x00, 0x00, 0xff, 0xff, 0xd0, 0xae, 0xca, 0xcb, - 0x2f, 0x0b, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ContainersClient is the client API for Containers service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ContainersClient interface { - Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) - List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) - ListStream(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (Containers_ListStreamClient, error) - Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) - Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) - Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*types.Empty, error) -} - -type containersClient struct { - cc *grpc.ClientConn -} - -func NewContainersClient(cc *grpc.ClientConn) ContainersClient { - return &containersClient{cc} -} - -func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) { - out := new(GetContainerResponse) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *containersClient) List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) { - out := new(ListContainersResponse) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/List", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *containersClient) ListStream(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (Containers_ListStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_Containers_serviceDesc.Streams[0], "/containerd.services.containers.v1.Containers/ListStream", opts...) - if err != nil { - return nil, err - } - x := &containersListStreamClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Containers_ListStreamClient interface { - Recv() (*ListContainerMessage, error) - grpc.ClientStream -} - -type containersListStreamClient struct { - grpc.ClientStream -} - -func (x *containersListStreamClient) Recv() (*ListContainerMessage, error) { - m := new(ListContainerMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *containersClient) Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { - out := new(CreateContainerResponse) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Create", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *containersClient) Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) { - out := new(UpdateContainerResponse) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *containersClient) Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ContainersServer is the server API for Containers service. -type ContainersServer interface { - Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error) - List(context.Context, *ListContainersRequest) (*ListContainersResponse, error) - ListStream(*ListContainersRequest, Containers_ListStreamServer) error - Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) - Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) - Delete(context.Context, *DeleteContainerRequest) (*types.Empty, error) -} - -// UnimplementedContainersServer can be embedded to have forward compatible implementations. -type UnimplementedContainersServer struct { -} - -func (*UnimplementedContainersServer) Get(ctx context.Context, req *GetContainerRequest) (*GetContainerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedContainersServer) List(ctx context.Context, req *ListContainersRequest) (*ListContainersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedContainersServer) ListStream(req *ListContainersRequest, srv Containers_ListStreamServer) error { - return status.Errorf(codes.Unimplemented, "method ListStream not implemented") -} -func (*UnimplementedContainersServer) Create(ctx context.Context, req *CreateContainerRequest) (*CreateContainerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") -} -func (*UnimplementedContainersServer) Update(ctx context.Context, req *UpdateContainerRequest) (*UpdateContainerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (*UnimplementedContainersServer) Delete(ctx context.Context, req *DeleteContainerRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} - -func RegisterContainersServer(s *grpc.Server, srv ContainersServer) { - s.RegisterService(&_Containers_serviceDesc, srv) -} - -func _Containers_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).Get(ctx, req.(*GetContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Containers_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListContainersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/List", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).List(ctx, req.(*ListContainersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Containers_ListStream_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ListContainersRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ContainersServer).ListStream(m, &containersListStreamServer{stream}) -} - -type Containers_ListStreamServer interface { - Send(*ListContainerMessage) error - grpc.ServerStream -} - -type containersListStreamServer struct { - grpc.ServerStream -} - -func (x *containersListStreamServer) Send(m *ListContainerMessage) error { - return x.ServerStream.SendMsg(m) -} - -func _Containers_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/Create", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).Create(ctx, req.(*CreateContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Containers_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).Update(ctx, req.(*UpdateContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Containers_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).Delete(ctx, req.(*DeleteContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Containers_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.containers.v1.Containers", - HandlerType: (*ContainersServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Get", - Handler: _Containers_Get_Handler, - }, - { - MethodName: "List", - Handler: _Containers_List_Handler, - }, - { - MethodName: "Create", - Handler: _Containers_Create_Handler, - }, - { - MethodName: "Update", - Handler: _Containers_Update_Handler, - }, - { - MethodName: "Delete", - Handler: _Containers_Delete_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ListStream", - Handler: _Containers_ListStream_Handler, - ServerStreams: true, - }, - }, - Metadata: "github.com/containerd/containerd/api/services/containers/v1/containers.proto", -} - -func (m *Container) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Container) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Extensions) > 0 { - for k := range m.Extensions { - v := m.Extensions[k] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintContainers(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintContainers(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x52 - } - } - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintContainers(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x4a - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) - if err3 != nil { - return 0, err3 - } - i -= n3 - i = encodeVarintContainers(dAtA, i, uint64(n3)) - i-- - dAtA[i] = 0x42 - if len(m.SnapshotKey) > 0 { - i -= len(m.SnapshotKey) - copy(dAtA[i:], m.SnapshotKey) - i = encodeVarintContainers(dAtA, i, uint64(len(m.SnapshotKey))) - i-- - dAtA[i] = 0x3a - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintContainers(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0x32 - } - if m.Spec != nil { - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Runtime != nil { - { - size, err := m.Runtime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.Image) > 0 { - i -= len(m.Image) - copy(dAtA[i:], m.Image) - i = encodeVarintContainers(dAtA, i, uint64(len(m.Image))) - i-- - dAtA[i] = 0x1a - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintContainers(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintContainers(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintContainers(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintContainers(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Container_Runtime) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Container_Runtime) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Container_Runtime) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintContainers(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetContainerRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetContainerRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintContainers(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetContainerResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetContainerResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ListContainersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListContainersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListContainersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintContainers(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ListContainersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListContainersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListContainersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Containers) > 0 { - for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *CreateContainerRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateContainerRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CreateContainerResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateContainerResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateContainerRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateContainerRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.UpdateMask != nil { - { - size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateContainerResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateContainerResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *DeleteContainerRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteContainerRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintContainers(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListContainerMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListContainerMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListContainerMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Container != nil { - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintContainers(dAtA []byte, offset int, v uint64) int { - offset -= sovContainers(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Container) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovContainers(uint64(len(k))) + 1 + len(v) + sovContainers(uint64(len(v))) - n += mapEntrySize + 1 + sovContainers(uint64(mapEntrySize)) - } - } - l = len(m.Image) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - if m.Runtime != nil { - l = m.Runtime.Size() - n += 1 + l + sovContainers(uint64(l)) - } - if m.Spec != nil { - l = m.Spec.Size() - n += 1 + l + sovContainers(uint64(l)) - } - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - l = len(m.SnapshotKey) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovContainers(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovContainers(uint64(l)) - if len(m.Extensions) > 0 { - for k, v := range m.Extensions { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovContainers(uint64(len(k))) + 1 + l + sovContainers(uint64(l)) - n += mapEntrySize + 1 + sovContainers(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Container_Runtime) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovContainers(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetContainerRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetContainerResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovContainers(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListContainersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovContainers(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListContainersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovContainers(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateContainerRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovContainers(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateContainerResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovContainers(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateContainerRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovContainers(uint64(l)) - if m.UpdateMask != nil { - l = m.UpdateMask.Size() - n += 1 + l + sovContainers(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateContainerResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovContainers(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteContainerRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListContainerMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Container != nil { - l = m.Container.Size() - n += 1 + l + sovContainers(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovContainers(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozContainers(x uint64) (n int) { - return sovContainers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Container) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - keysForExtensions := make([]string, 0, len(this.Extensions)) - for k, _ := range this.Extensions { - keysForExtensions = append(keysForExtensions, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtensions) - mapStringForExtensions := "map[string]types.Any{" - for _, k := range keysForExtensions { - mapStringForExtensions += fmt.Sprintf("%v: %v,", k, this.Extensions[k]) - } - mapStringForExtensions += "}" - s := strings.Join([]string{`&Container{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Labels:` + mapStringForLabels + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "Container_Runtime", "Container_Runtime", 1) + `,`, - `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "types.Any", 1) + `,`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `SnapshotKey:` + fmt.Sprintf("%v", this.SnapshotKey) + `,`, - `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Extensions:` + mapStringForExtensions + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Container_Runtime) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Container_Runtime{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetContainerRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetContainerRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetContainerResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetContainerResponse{`, - `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListContainersRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListContainersRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListContainersResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForContainers := "[]Container{" - for _, f := range this.Containers { - repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," - } - repeatedStringForContainers += "}" - s := strings.Join([]string{`&ListContainersResponse{`, - `Containers:` + repeatedStringForContainers + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateContainerRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateContainerRequest{`, - `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateContainerResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateContainerResponse{`, - `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateContainerRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateContainerRequest{`, - `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, - `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types.FieldMask", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateContainerResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateContainerResponse{`, - `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteContainerRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteContainerRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListContainerMessage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListContainerMessage{`, - `Container:` + strings.Replace(this.Container.String(), "Container", "Container", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringContainers(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Container) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Container: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContainers - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthContainers - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContainers - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthContainers - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Runtime == nil { - m.Runtime = &Container_Runtime{} - } - if err := m.Runtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Spec == nil { - m.Spec = &types.Any{} - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotKey", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SnapshotKey = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Extensions == nil { - m.Extensions = make(map[string]types.Any) - } - var mapkey string - mapvalue := &types.Any{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContainers - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthContainers - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthContainers - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthContainers - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &types.Any{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Extensions[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *ListContainerMessage) GetContainer() *Container { + if x != nil { + return x.Container } return nil } -func (m *Container_Runtime) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Runtime: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Runtime: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &types.Any{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +type Container_Runtime struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name is the name of the runtime. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Options specify additional runtime initialization options. + Options *anypb.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *Container_Runtime) Reset() { + *x = Container_Runtime{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Container_Runtime) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Container_Runtime) ProtoMessage() {} + +func (x *Container_Runtime) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Container_Runtime.ProtoReflect.Descriptor instead. +func (*Container_Runtime) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Container_Runtime) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Container_Runtime) GetOptions() *anypb.Any { + if x != nil { + return x.Options } return nil } -func (m *GetContainerRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetContainerRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetContainerResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetContainerResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +var File_github_com_containerd_containerd_api_services_containers_v1_containers_proto protoreflect.FileDescriptor - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListContainersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListContainersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListContainersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListContainersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListContainersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListContainersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Containers = append(m.Containers, Container{}) - if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateContainerRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateContainerRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateContainerResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateContainerResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateContainerRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateContainerRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdateMask == nil { - m.UpdateMask = &types.FieldMask{} - } - if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateContainerResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateContainerResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteContainerRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteContainerRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListContainerMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListContainerMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListContainerMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Container == nil { - m.Container = &Container{} - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipContainers(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContainers - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContainers - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContainers - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthContainers - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupContainers - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthContainers - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDesc = []byte{ + 0x0a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x21, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, + 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8f, 0x06, 0x0a, + 0x09, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x50, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x12, 0x4e, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x20, 0x0a, 0x0b, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x21, + 0x0a, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4b, 0x65, + 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x5c, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x1a, + 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x07, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x53, 0x0a, 0x0f, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, + 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x62, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, + 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, 0x31, 0x0a, 0x15, 0x4c, 0x69, 0x73, + 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x16, + 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x73, 0x22, 0x64, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, + 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, + 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, 0x65, 0x0a, 0x17, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x09, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x65, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x4a, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, 0x28, 0x0a, 0x16, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x62, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x4a, + 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, + 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x32, 0xe4, 0x05, 0x0a, 0x0a, 0x43, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x76, 0x0a, 0x03, 0x47, 0x65, 0x74, + 0x12, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x7b, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, + 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x38, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x30, 0x01, 0x12, 0x7f, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x39, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x7f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x39, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x39, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2f, 0x76, 0x31, + 0x3b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthContainers = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowContainers = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupContainers = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescData = file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_goTypes = []interface{}{ + (*Container)(nil), // 0: containerd.services.containers.v1.Container + (*GetContainerRequest)(nil), // 1: containerd.services.containers.v1.GetContainerRequest + (*GetContainerResponse)(nil), // 2: containerd.services.containers.v1.GetContainerResponse + (*ListContainersRequest)(nil), // 3: containerd.services.containers.v1.ListContainersRequest + (*ListContainersResponse)(nil), // 4: containerd.services.containers.v1.ListContainersResponse + (*CreateContainerRequest)(nil), // 5: containerd.services.containers.v1.CreateContainerRequest + (*CreateContainerResponse)(nil), // 6: containerd.services.containers.v1.CreateContainerResponse + (*UpdateContainerRequest)(nil), // 7: containerd.services.containers.v1.UpdateContainerRequest + (*UpdateContainerResponse)(nil), // 8: containerd.services.containers.v1.UpdateContainerResponse + (*DeleteContainerRequest)(nil), // 9: containerd.services.containers.v1.DeleteContainerRequest + (*ListContainerMessage)(nil), // 10: containerd.services.containers.v1.ListContainerMessage + nil, // 11: containerd.services.containers.v1.Container.LabelsEntry + (*Container_Runtime)(nil), // 12: containerd.services.containers.v1.Container.Runtime + nil, // 13: containerd.services.containers.v1.Container.ExtensionsEntry + (*anypb.Any)(nil), // 14: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 16: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_depIdxs = []int32{ + 11, // 0: containerd.services.containers.v1.Container.labels:type_name -> containerd.services.containers.v1.Container.LabelsEntry + 12, // 1: containerd.services.containers.v1.Container.runtime:type_name -> containerd.services.containers.v1.Container.Runtime + 14, // 2: containerd.services.containers.v1.Container.spec:type_name -> google.protobuf.Any + 15, // 3: containerd.services.containers.v1.Container.created_at:type_name -> google.protobuf.Timestamp + 15, // 4: containerd.services.containers.v1.Container.updated_at:type_name -> google.protobuf.Timestamp + 13, // 5: containerd.services.containers.v1.Container.extensions:type_name -> containerd.services.containers.v1.Container.ExtensionsEntry + 0, // 6: containerd.services.containers.v1.GetContainerResponse.container:type_name -> containerd.services.containers.v1.Container + 0, // 7: containerd.services.containers.v1.ListContainersResponse.containers:type_name -> containerd.services.containers.v1.Container + 0, // 8: containerd.services.containers.v1.CreateContainerRequest.container:type_name -> containerd.services.containers.v1.Container + 0, // 9: containerd.services.containers.v1.CreateContainerResponse.container:type_name -> containerd.services.containers.v1.Container + 0, // 10: containerd.services.containers.v1.UpdateContainerRequest.container:type_name -> containerd.services.containers.v1.Container + 16, // 11: containerd.services.containers.v1.UpdateContainerRequest.update_mask:type_name -> google.protobuf.FieldMask + 0, // 12: containerd.services.containers.v1.UpdateContainerResponse.container:type_name -> containerd.services.containers.v1.Container + 0, // 13: containerd.services.containers.v1.ListContainerMessage.container:type_name -> containerd.services.containers.v1.Container + 14, // 14: containerd.services.containers.v1.Container.Runtime.options:type_name -> google.protobuf.Any + 14, // 15: containerd.services.containers.v1.Container.ExtensionsEntry.value:type_name -> google.protobuf.Any + 1, // 16: containerd.services.containers.v1.Containers.Get:input_type -> containerd.services.containers.v1.GetContainerRequest + 3, // 17: containerd.services.containers.v1.Containers.List:input_type -> containerd.services.containers.v1.ListContainersRequest + 3, // 18: containerd.services.containers.v1.Containers.ListStream:input_type -> containerd.services.containers.v1.ListContainersRequest + 5, // 19: containerd.services.containers.v1.Containers.Create:input_type -> containerd.services.containers.v1.CreateContainerRequest + 7, // 20: containerd.services.containers.v1.Containers.Update:input_type -> containerd.services.containers.v1.UpdateContainerRequest + 9, // 21: containerd.services.containers.v1.Containers.Delete:input_type -> containerd.services.containers.v1.DeleteContainerRequest + 2, // 22: containerd.services.containers.v1.Containers.Get:output_type -> containerd.services.containers.v1.GetContainerResponse + 4, // 23: containerd.services.containers.v1.Containers.List:output_type -> containerd.services.containers.v1.ListContainersResponse + 10, // 24: containerd.services.containers.v1.Containers.ListStream:output_type -> containerd.services.containers.v1.ListContainerMessage + 6, // 25: containerd.services.containers.v1.Containers.Create:output_type -> containerd.services.containers.v1.CreateContainerResponse + 8, // 26: containerd.services.containers.v1.Containers.Update:output_type -> containerd.services.containers.v1.UpdateContainerResponse + 17, // 27: containerd.services.containers.v1.Containers.Delete:output_type -> google.protobuf.Empty + 22, // [22:28] is the sub-list for method output_type + 16, // [16:22] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_init() } +func file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_init() { + if File_github_com_containerd_containerd_api_services_containers_v1_containers_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Container); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetContainerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetContainerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListContainersRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListContainersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateContainerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateContainerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateContainerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateContainerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteContainerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListContainerMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Container_Runtime); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDesc, + NumEnums: 0, + NumMessages: 14, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_containers_v1_containers_proto = out.File + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto index eb4068e612..3de07ffbd6 100644 --- a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto +++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto @@ -18,7 +18,6 @@ syntax = "proto3"; package containerd.services.containers.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; @@ -99,10 +98,10 @@ message Container { string snapshot_key = 7; // CreatedAt is the time the container was first created. - google.protobuf.Timestamp created_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp created_at = 8; // UpdatedAt is the last time the container was mutated. - google.protobuf.Timestamp updated_at = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp updated_at = 9; // Extensions allow clients to provide zero or more blobs that are directly // associated with the container. One may provide protobuf, json, or other @@ -113,7 +112,10 @@ message Container { // that should be unique against other extensions. When updating extension // data, one should only update the specified extension using field paths // to select a specific map key. - map extensions = 10 [(gogoproto.nullable) = false]; + map extensions = 10; + + // Sandbox ID this container belongs to. + string sandbox = 11; } message GetContainerRequest { @@ -121,7 +123,7 @@ message GetContainerRequest { } message GetContainerResponse { - Container container = 1 [(gogoproto.nullable) = false]; + Container container = 1; } message ListContainersRequest { @@ -139,15 +141,15 @@ message ListContainersRequest { } message ListContainersResponse { - repeated Container containers = 1 [(gogoproto.nullable) = false]; + repeated Container containers = 1; } message CreateContainerRequest { - Container container = 1 [(gogoproto.nullable) = false]; + Container container = 1; } message CreateContainerResponse { - Container container = 1 [(gogoproto.nullable) = false]; + Container container = 1; } // UpdateContainerRequest updates the metadata on one or more container. @@ -159,7 +161,7 @@ message UpdateContainerRequest { // Container provides the target values, as declared by the mask, for the update. // // The ID field must be set. - Container container = 1 [(gogoproto.nullable) = false]; + Container container = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. @@ -167,7 +169,7 @@ message UpdateContainerRequest { } message UpdateContainerResponse { - Container container = 1 [(gogoproto.nullable) = false]; + Container container = 1; } message DeleteContainerRequest { diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_grpc.pb.go new file mode 100644 index 0000000000..701e5c1ebc --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_grpc.pb.go @@ -0,0 +1,314 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/containers/v1/containers.proto + +package containers + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ContainersClient is the client API for Containers service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ContainersClient interface { + Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) + List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) + ListStream(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (Containers_ListStreamClient, error) + Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) + Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) + Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type containersClient struct { + cc grpc.ClientConnInterface +} + +func NewContainersClient(cc grpc.ClientConnInterface) ContainersClient { + return &containersClient{cc} +} + +func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) { + out := new(GetContainerResponse) + err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containersClient) List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) { + out := new(ListContainersResponse) + err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containersClient) ListStream(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (Containers_ListStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &Containers_ServiceDesc.Streams[0], "/containerd.services.containers.v1.Containers/ListStream", opts...) + if err != nil { + return nil, err + } + x := &containersListStreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Containers_ListStreamClient interface { + Recv() (*ListContainerMessage, error) + grpc.ClientStream +} + +type containersListStreamClient struct { + grpc.ClientStream +} + +func (x *containersListStreamClient) Recv() (*ListContainerMessage, error) { + m := new(ListContainerMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *containersClient) Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { + out := new(CreateContainerResponse) + err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containersClient) Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) { + out := new(UpdateContainerResponse) + err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containersClient) Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ContainersServer is the server API for Containers service. +// All implementations must embed UnimplementedContainersServer +// for forward compatibility +type ContainersServer interface { + Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error) + List(context.Context, *ListContainersRequest) (*ListContainersResponse, error) + ListStream(*ListContainersRequest, Containers_ListStreamServer) error + Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) + Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) + Delete(context.Context, *DeleteContainerRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedContainersServer() +} + +// UnimplementedContainersServer must be embedded to have forward compatible implementations. +type UnimplementedContainersServer struct { +} + +func (UnimplementedContainersServer) Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedContainersServer) List(context.Context, *ListContainersRequest) (*ListContainersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedContainersServer) ListStream(*ListContainersRequest, Containers_ListStreamServer) error { + return status.Errorf(codes.Unimplemented, "method ListStream not implemented") +} +func (UnimplementedContainersServer) Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedContainersServer) Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedContainersServer) Delete(context.Context, *DeleteContainerRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedContainersServer) mustEmbedUnimplementedContainersServer() {} + +// UnsafeContainersServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ContainersServer will +// result in compilation errors. +type UnsafeContainersServer interface { + mustEmbedUnimplementedContainersServer() +} + +func RegisterContainersServer(s grpc.ServiceRegistrar, srv ContainersServer) { + s.RegisterService(&Containers_ServiceDesc, srv) +} + +func _Containers_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).Get(ctx, req.(*GetContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Containers_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListContainersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).List(ctx, req.(*ListContainersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Containers_ListStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListContainersRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ContainersServer).ListStream(m, &containersListStreamServer{stream}) +} + +type Containers_ListStreamServer interface { + Send(*ListContainerMessage) error + grpc.ServerStream +} + +type containersListStreamServer struct { + grpc.ServerStream +} + +func (x *containersListStreamServer) Send(m *ListContainerMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _Containers_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).Create(ctx, req.(*CreateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Containers_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).Update(ctx, req.(*UpdateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Containers_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).Delete(ctx, req.(*DeleteContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Containers_ServiceDesc is the grpc.ServiceDesc for Containers service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Containers_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.containers.v1.Containers", + HandlerType: (*ContainersServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _Containers_Get_Handler, + }, + { + MethodName: "List", + Handler: _Containers_List_Handler, + }, + { + MethodName: "Create", + Handler: _Containers_Create_Handler, + }, + { + MethodName: "Update", + Handler: _Containers_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Containers_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListStream", + Handler: _Containers_ListStream_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/containerd/containerd/api/services/containers/v1/containers.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go index df272237cc..2836646628 100644 --- a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go @@ -1,38 +1,42 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/content/v1/content.proto package content import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // WriteAction defines the behavior of a WriteRequest. type WriteAction int32 @@ -40,14 +44,14 @@ type WriteAction int32 const ( // WriteActionStat instructs the writer to return the current status while // holding the lock on the write. - WriteActionStat WriteAction = 0 + WriteAction_STAT WriteAction = 0 // WriteActionWrite sets the action for the write request to write data. // // Any data included will be written at the provided offset. The // transaction will be left open for further writes. // // This is the default. - WriteActionWrite WriteAction = 1 + WriteAction_WRITE WriteAction = 1 // WriteActionCommit will write any outstanding data in the message and // commit the write, storing it under the digest. // @@ -55,243 +59,343 @@ const ( // commit it. // // This action will always terminate the write. - WriteActionCommit WriteAction = 2 + WriteAction_COMMIT WriteAction = 2 ) -var WriteAction_name = map[int32]string{ - 0: "STAT", - 1: "WRITE", - 2: "COMMIT", -} +// Enum value maps for WriteAction. +var ( + WriteAction_name = map[int32]string{ + 0: "STAT", + 1: "WRITE", + 2: "COMMIT", + } + WriteAction_value = map[string]int32{ + "STAT": 0, + "WRITE": 1, + "COMMIT": 2, + } +) -var WriteAction_value = map[string]int32{ - "STAT": 0, - "WRITE": 1, - "COMMIT": 2, +func (x WriteAction) Enum() *WriteAction { + p := new(WriteAction) + *p = x + return p } func (x WriteAction) String() string { - return proto.EnumName(WriteAction_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } +func (WriteAction) Descriptor() protoreflect.EnumDescriptor { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_enumTypes[0].Descriptor() +} + +func (WriteAction) Type() protoreflect.EnumType { + return &file_github_com_containerd_containerd_api_services_content_v1_content_proto_enumTypes[0] +} + +func (x WriteAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use WriteAction.Descriptor instead. func (WriteAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{0} + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{0} } type Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Digest is the hash identity of the blob. - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` // Size is the total number of bytes in the blob. - Size_ int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` + Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` // CreatedAt provides the time at which the blob was committed. - CreatedAt time.Time `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` // UpdatedAt provides the time the info was last updated. - UpdatedAt time.Time `protobuf:"bytes,4,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *Info) Reset() { *m = Info{} } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{0} -} -func (m *Info) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Info.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Info) Reset() { + *x = Info{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Info) XXX_Merge(src proto.Message) { - xxx_messageInfo_Info.Merge(m, src) -} -func (m *Info) XXX_Size() int { - return m.Size() -} -func (m *Info) XXX_DiscardUnknown() { - xxx_messageInfo_Info.DiscardUnknown(m) + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_Info proto.InternalMessageInfo +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Info.ProtoReflect.Descriptor instead. +func (*Info) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{0} +} + +func (x *Info) GetDigest() string { + if x != nil { + return x.Digest + } + return "" +} + +func (x *Info) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *Info) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *Info) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *Info) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type InfoRequest struct { - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` } -func (m *InfoRequest) Reset() { *m = InfoRequest{} } -func (*InfoRequest) ProtoMessage() {} -func (*InfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{1} -} -func (m *InfoRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *InfoRequest) Reset() { + *x = InfoRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *InfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InfoRequest.Merge(m, src) -} -func (m *InfoRequest) XXX_Size() int { - return m.Size() -} -func (m *InfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InfoRequest.DiscardUnknown(m) + +func (x *InfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_InfoRequest proto.InternalMessageInfo +func (*InfoRequest) ProtoMessage() {} + +func (x *InfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InfoRequest.ProtoReflect.Descriptor instead. +func (*InfoRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{1} +} + +func (x *InfoRequest) GetDigest() string { + if x != nil { + return x.Digest + } + return "" +} type InfoResponse struct { - Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` } -func (m *InfoResponse) Reset() { *m = InfoResponse{} } -func (*InfoResponse) ProtoMessage() {} -func (*InfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{2} -} -func (m *InfoResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *InfoResponse) Reset() { + *x = InfoResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *InfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_InfoResponse.Merge(m, src) -} -func (m *InfoResponse) XXX_Size() int { - return m.Size() -} -func (m *InfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_InfoResponse.DiscardUnknown(m) + +func (x *InfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_InfoResponse proto.InternalMessageInfo +func (*InfoResponse) ProtoMessage() {} + +func (x *InfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InfoResponse.ProtoReflect.Descriptor instead. +func (*InfoResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{2} +} + +func (x *InfoResponse) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} type UpdateRequest struct { - Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. // // In info, Digest, Size, and CreatedAt are immutable, // other field may be updated using this mask. // If no mask is provided, all mutable field are updated. - UpdateMask *types.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } -func (m *UpdateRequest) Reset() { *m = UpdateRequest{} } -func (*UpdateRequest) ProtoMessage() {} -func (*UpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{3} -} -func (m *UpdateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateRequest) Reset() { + *x = UpdateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateRequest.Merge(m, src) -} -func (m *UpdateRequest) XXX_Size() int { - return m.Size() -} -func (m *UpdateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateRequest.DiscardUnknown(m) + +func (x *UpdateRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_UpdateRequest proto.InternalMessageInfo +func (*UpdateRequest) ProtoMessage() {} + +func (x *UpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateRequest.ProtoReflect.Descriptor instead. +func (*UpdateRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{3} +} + +func (x *UpdateRequest) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} + +func (x *UpdateRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} type UpdateResponse struct { - Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` } -func (m *UpdateResponse) Reset() { *m = UpdateResponse{} } -func (*UpdateResponse) ProtoMessage() {} -func (*UpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{4} -} -func (m *UpdateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateResponse) Reset() { + *x = UpdateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateResponse.Merge(m, src) -} -func (m *UpdateResponse) XXX_Size() int { - return m.Size() -} -func (m *UpdateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateResponse.DiscardUnknown(m) + +func (x *UpdateResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_UpdateResponse proto.InternalMessageInfo +func (*UpdateResponse) ProtoMessage() {} + +func (x *UpdateResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateResponse.ProtoReflect.Descriptor instead. +func (*UpdateResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateResponse) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} type ListContentRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Filters contains one or more filters using the syntax defined in the // containerd filter package. // @@ -302,415 +406,551 @@ type ListContentRequest struct { // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func (m *ListContentRequest) Reset() { *m = ListContentRequest{} } -func (*ListContentRequest) ProtoMessage() {} -func (*ListContentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{5} -} -func (m *ListContentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListContentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListContentRequest) Reset() { + *x = ListContentRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListContentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListContentRequest.Merge(m, src) -} -func (m *ListContentRequest) XXX_Size() int { - return m.Size() -} -func (m *ListContentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListContentRequest.DiscardUnknown(m) + +func (x *ListContentRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListContentRequest proto.InternalMessageInfo +func (*ListContentRequest) ProtoMessage() {} + +func (x *ListContentRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListContentRequest.ProtoReflect.Descriptor instead. +func (*ListContentRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{5} +} + +func (x *ListContentRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil +} type ListContentResponse struct { - Info []Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info []*Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info,omitempty"` } -func (m *ListContentResponse) Reset() { *m = ListContentResponse{} } -func (*ListContentResponse) ProtoMessage() {} -func (*ListContentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{6} -} -func (m *ListContentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListContentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListContentResponse) Reset() { + *x = ListContentResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListContentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListContentResponse.Merge(m, src) -} -func (m *ListContentResponse) XXX_Size() int { - return m.Size() -} -func (m *ListContentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListContentResponse.DiscardUnknown(m) + +func (x *ListContentResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListContentResponse proto.InternalMessageInfo +func (*ListContentResponse) ProtoMessage() {} + +func (x *ListContentResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListContentResponse.ProtoReflect.Descriptor instead. +func (*ListContentResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{6} +} + +func (x *ListContentResponse) GetInfo() []*Info { + if x != nil { + return x.Info + } + return nil +} type DeleteContentRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Digest specifies which content to delete. - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` } -func (m *DeleteContentRequest) Reset() { *m = DeleteContentRequest{} } -func (*DeleteContentRequest) ProtoMessage() {} -func (*DeleteContentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{7} -} -func (m *DeleteContentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteContentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteContentRequest) Reset() { + *x = DeleteContentRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteContentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteContentRequest.Merge(m, src) -} -func (m *DeleteContentRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteContentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteContentRequest.DiscardUnknown(m) + +func (x *DeleteContentRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_DeleteContentRequest proto.InternalMessageInfo +func (*DeleteContentRequest) ProtoMessage() {} + +func (x *DeleteContentRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteContentRequest.ProtoReflect.Descriptor instead. +func (*DeleteContentRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{7} +} + +func (x *DeleteContentRequest) GetDigest() string { + if x != nil { + return x.Digest + } + return "" +} // ReadContentRequest defines the fields that make up a request to read a portion of // data from a stored object. type ReadContentRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Digest is the hash identity to read. - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` // Offset specifies the number of bytes from the start at which to begin // the read. If zero or less, the read will be from the start. This uses // standard zero-indexed semantics. Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` // size is the total size of the read. If zero, the entire blob will be // returned by the service. - Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` } -func (m *ReadContentRequest) Reset() { *m = ReadContentRequest{} } -func (*ReadContentRequest) ProtoMessage() {} -func (*ReadContentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{8} -} -func (m *ReadContentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadContentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ReadContentRequest) Reset() { + *x = ReadContentRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ReadContentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadContentRequest.Merge(m, src) -} -func (m *ReadContentRequest) XXX_Size() int { - return m.Size() -} -func (m *ReadContentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReadContentRequest.DiscardUnknown(m) + +func (x *ReadContentRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ReadContentRequest proto.InternalMessageInfo +func (*ReadContentRequest) ProtoMessage() {} + +func (x *ReadContentRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadContentRequest.ProtoReflect.Descriptor instead. +func (*ReadContentRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{8} +} + +func (x *ReadContentRequest) GetDigest() string { + if x != nil { + return x.Digest + } + return "" +} + +func (x *ReadContentRequest) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *ReadContentRequest) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} // ReadContentResponse carries byte data for a read request. type ReadContentResponse struct { - Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` // offset of the returned data + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // actual data } -func (m *ReadContentResponse) Reset() { *m = ReadContentResponse{} } -func (*ReadContentResponse) ProtoMessage() {} -func (*ReadContentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{9} -} -func (m *ReadContentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadContentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ReadContentResponse) Reset() { + *x = ReadContentResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ReadContentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadContentResponse.Merge(m, src) -} -func (m *ReadContentResponse) XXX_Size() int { - return m.Size() -} -func (m *ReadContentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadContentResponse.DiscardUnknown(m) + +func (x *ReadContentResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ReadContentResponse proto.InternalMessageInfo +func (*ReadContentResponse) ProtoMessage() {} + +func (x *ReadContentResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadContentResponse.ProtoReflect.Descriptor instead. +func (*ReadContentResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{9} +} + +func (x *ReadContentResponse) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *ReadContentResponse) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} type Status struct { - StartedAt time.Time `protobuf:"bytes,1,opt,name=started_at,json=startedAt,proto3,stdtime" json:"started_at"` - UpdatedAt time.Time `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` - Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` - Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` - Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` - Expected github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartedAt *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` + Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` + Expected string `protobuf:"bytes,6,opt,name=expected,proto3" json:"expected,omitempty"` } -func (m *Status) Reset() { *m = Status{} } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{10} -} -func (m *Status) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) -} -func (m *Status) XXX_Size() int { - return m.Size() -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_Status proto.InternalMessageInfo +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{10} +} + +func (x *Status) GetStartedAt() *timestamppb.Timestamp { + if x != nil { + return x.StartedAt + } + return nil +} + +func (x *Status) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *Status) GetRef() string { + if x != nil { + return x.Ref + } + return "" +} + +func (x *Status) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *Status) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +func (x *Status) GetExpected() string { + if x != nil { + return x.Expected + } + return "" +} type StatusRequest struct { - Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` } -func (m *StatusRequest) Reset() { *m = StatusRequest{} } -func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{11} -} -func (m *StatusRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *StatusRequest) Reset() { + *x = StatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *StatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusRequest.Merge(m, src) -} -func (m *StatusRequest) XXX_Size() int { - return m.Size() -} -func (m *StatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatusRequest.DiscardUnknown(m) + +func (x *StatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_StatusRequest proto.InternalMessageInfo +func (*StatusRequest) ProtoMessage() {} + +func (x *StatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead. +func (*StatusRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{11} +} + +func (x *StatusRequest) GetRef() string { + if x != nil { + return x.Ref + } + return "" +} type StatusResponse struct { - Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` } -func (m *StatusResponse) Reset() { *m = StatusResponse{} } -func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{12} -} -func (m *StatusResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *StatusResponse) Reset() { + *x = StatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *StatusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusResponse.Merge(m, src) -} -func (m *StatusResponse) XXX_Size() int { - return m.Size() -} -func (m *StatusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatusResponse.DiscardUnknown(m) + +func (x *StatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_StatusResponse proto.InternalMessageInfo +func (*StatusResponse) ProtoMessage() {} + +func (x *StatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. +func (*StatusResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{12} +} + +func (x *StatusResponse) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} type ListStatusesRequest struct { - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func (m *ListStatusesRequest) Reset() { *m = ListStatusesRequest{} } -func (*ListStatusesRequest) ProtoMessage() {} -func (*ListStatusesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{13} -} -func (m *ListStatusesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListStatusesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListStatusesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListStatusesRequest) Reset() { + *x = ListStatusesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListStatusesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListStatusesRequest.Merge(m, src) -} -func (m *ListStatusesRequest) XXX_Size() int { - return m.Size() -} -func (m *ListStatusesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListStatusesRequest.DiscardUnknown(m) + +func (x *ListStatusesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListStatusesRequest proto.InternalMessageInfo +func (*ListStatusesRequest) ProtoMessage() {} + +func (x *ListStatusesRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListStatusesRequest.ProtoReflect.Descriptor instead. +func (*ListStatusesRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{13} +} + +func (x *ListStatusesRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil +} type ListStatusesResponse struct { - Statuses []Status `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Statuses []*Status `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty"` } -func (m *ListStatusesResponse) Reset() { *m = ListStatusesResponse{} } -func (*ListStatusesResponse) ProtoMessage() {} -func (*ListStatusesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{14} -} -func (m *ListStatusesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListStatusesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListStatusesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListStatusesResponse) Reset() { + *x = ListStatusesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListStatusesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListStatusesResponse.Merge(m, src) -} -func (m *ListStatusesResponse) XXX_Size() int { - return m.Size() -} -func (m *ListStatusesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListStatusesResponse.DiscardUnknown(m) + +func (x *ListStatusesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListStatusesResponse proto.InternalMessageInfo +func (*ListStatusesResponse) ProtoMessage() {} + +func (x *ListStatusesResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListStatusesResponse.ProtoReflect.Descriptor instead. +func (*ListStatusesResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{14} +} + +func (x *ListStatusesResponse) GetStatuses() []*Status { + if x != nil { + return x.Statuses + } + return nil +} // WriteContentRequest writes data to the request ref at offset. type WriteContentRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Action sets the behavior of the write. // // When this is a write and the ref is not yet allocated, the ref will be @@ -744,7 +984,7 @@ type WriteContentRequest struct { // Only the latest version will be used to check the content against the // digest. It is only required to include it on a single message, before or // with the commit action message. - Expected github_com_opencontainers_go_digest.Digest `protobuf:"bytes,4,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"` + Expected string `protobuf:"bytes,4,opt,name=expected,proto3" json:"expected,omitempty"` // Offset specifies the number of bytes from the start at which to begin // the write. For most implementations, this means from the start of the // file. This uses standard, zero-indexed semantics. @@ -763,46 +1003,96 @@ type WriteContentRequest struct { // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *WriteContentRequest) Reset() { *m = WriteContentRequest{} } -func (*WriteContentRequest) ProtoMessage() {} -func (*WriteContentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{15} -} -func (m *WriteContentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteContentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *WriteContentRequest) Reset() { + *x = WriteContentRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *WriteContentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteContentRequest.Merge(m, src) -} -func (m *WriteContentRequest) XXX_Size() int { - return m.Size() -} -func (m *WriteContentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WriteContentRequest.DiscardUnknown(m) + +func (x *WriteContentRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_WriteContentRequest proto.InternalMessageInfo +func (*WriteContentRequest) ProtoMessage() {} + +func (x *WriteContentRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteContentRequest.ProtoReflect.Descriptor instead. +func (*WriteContentRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{15} +} + +func (x *WriteContentRequest) GetAction() WriteAction { + if x != nil { + return x.Action + } + return WriteAction_STAT +} + +func (x *WriteContentRequest) GetRef() string { + if x != nil { + return x.Ref + } + return "" +} + +func (x *WriteContentRequest) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +func (x *WriteContentRequest) GetExpected() string { + if x != nil { + return x.Expected + } + return "" +} + +func (x *WriteContentRequest) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *WriteContentRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *WriteContentRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} // WriteContentResponse is returned on the culmination of a write call. type WriteContentResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Action contains the action for the final message of the stream. A writer // should confirm that they match the intended result. Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.services.content.v1.WriteAction" json:"action,omitempty"` @@ -810,12 +1100,12 @@ type WriteContentResponse struct { // // This must be set for stat and commit write actions. All other write // actions may omit this. - StartedAt time.Time `protobuf:"bytes,2,opt,name=started_at,json=startedAt,proto3,stdtime" json:"started_at"` + StartedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` // UpdatedAt provides the last time of a successful write. // // This must be set for stat and commit write actions. All other write // actions may omit this. - UpdatedAt time.Time `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` // Offset is the current committed size for the write. Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` // Total provides the current, expected total size of the write. @@ -828,4598 +1118,671 @@ type WriteContentResponse struct { // Digest, if present, includes the digest up to the currently committed // bytes. If action is commit, this field will be set. It is implementation // defined if this is set for other actions. - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Digest string `protobuf:"bytes,6,opt,name=digest,proto3" json:"digest,omitempty"` } -func (m *WriteContentResponse) Reset() { *m = WriteContentResponse{} } -func (*WriteContentResponse) ProtoMessage() {} -func (*WriteContentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{16} -} -func (m *WriteContentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteContentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *WriteContentResponse) Reset() { + *x = WriteContentResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *WriteContentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteContentResponse.Merge(m, src) -} -func (m *WriteContentResponse) XXX_Size() int { - return m.Size() -} -func (m *WriteContentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WriteContentResponse.DiscardUnknown(m) + +func (x *WriteContentResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_WriteContentResponse proto.InternalMessageInfo +func (*WriteContentResponse) ProtoMessage() {} + +func (x *WriteContentResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteContentResponse.ProtoReflect.Descriptor instead. +func (*WriteContentResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{16} +} + +func (x *WriteContentResponse) GetAction() WriteAction { + if x != nil { + return x.Action + } + return WriteAction_STAT +} + +func (x *WriteContentResponse) GetStartedAt() *timestamppb.Timestamp { + if x != nil { + return x.StartedAt + } + return nil +} + +func (x *WriteContentResponse) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *WriteContentResponse) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *WriteContentResponse) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +func (x *WriteContentResponse) GetDigest() string { + if x != nil { + return x.Digest + } + return "" +} type AbortRequest struct { - Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` +} + +func (x *AbortRequest) Reset() { + *x = AbortRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AbortRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AbortRequest) Reset() { *m = AbortRequest{} } func (*AbortRequest) ProtoMessage() {} + +func (x *AbortRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AbortRequest.ProtoReflect.Descriptor instead. func (*AbortRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{17} -} -func (m *AbortRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AbortRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AbortRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AbortRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AbortRequest.Merge(m, src) -} -func (m *AbortRequest) XXX_Size() int { - return m.Size() -} -func (m *AbortRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AbortRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AbortRequest proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("containerd.services.content.v1.WriteAction", WriteAction_name, WriteAction_value) - proto.RegisterType((*Info)(nil), "containerd.services.content.v1.Info") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.content.v1.Info.LabelsEntry") - proto.RegisterType((*InfoRequest)(nil), "containerd.services.content.v1.InfoRequest") - proto.RegisterType((*InfoResponse)(nil), "containerd.services.content.v1.InfoResponse") - proto.RegisterType((*UpdateRequest)(nil), "containerd.services.content.v1.UpdateRequest") - proto.RegisterType((*UpdateResponse)(nil), "containerd.services.content.v1.UpdateResponse") - proto.RegisterType((*ListContentRequest)(nil), "containerd.services.content.v1.ListContentRequest") - proto.RegisterType((*ListContentResponse)(nil), "containerd.services.content.v1.ListContentResponse") - proto.RegisterType((*DeleteContentRequest)(nil), "containerd.services.content.v1.DeleteContentRequest") - proto.RegisterType((*ReadContentRequest)(nil), "containerd.services.content.v1.ReadContentRequest") - proto.RegisterType((*ReadContentResponse)(nil), "containerd.services.content.v1.ReadContentResponse") - proto.RegisterType((*Status)(nil), "containerd.services.content.v1.Status") - proto.RegisterType((*StatusRequest)(nil), "containerd.services.content.v1.StatusRequest") - proto.RegisterType((*StatusResponse)(nil), "containerd.services.content.v1.StatusResponse") - proto.RegisterType((*ListStatusesRequest)(nil), "containerd.services.content.v1.ListStatusesRequest") - proto.RegisterType((*ListStatusesResponse)(nil), "containerd.services.content.v1.ListStatusesResponse") - proto.RegisterType((*WriteContentRequest)(nil), "containerd.services.content.v1.WriteContentRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.content.v1.WriteContentRequest.LabelsEntry") - proto.RegisterType((*WriteContentResponse)(nil), "containerd.services.content.v1.WriteContentResponse") - proto.RegisterType((*AbortRequest)(nil), "containerd.services.content.v1.AbortRequest") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/content/v1/content.proto", fileDescriptor_468430ba3e400391) -} - -var fileDescriptor_468430ba3e400391 = []byte{ - // 1081 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0x78, 0xed, 0x4d, 0xf2, 0x9c, 0x16, 0x33, 0x31, 0x95, 0xb5, 0x08, 0x67, 0xbb, 0x42, - 0xc8, 0x6a, 0xc9, 0x3a, 0x75, 0x7a, 0x00, 0x2a, 0x01, 0x8e, 0x9b, 0xaa, 0x41, 0x4d, 0x41, 0x5b, - 0x97, 0x40, 0x2f, 0x65, 0x6d, 0x8f, 0xcd, 0x2a, 0xb6, 0xd7, 0xdd, 0x19, 0x5b, 0x84, 0x13, 0x17, - 0x24, 0x14, 0xf5, 0x80, 0xb8, 0xe7, 0x02, 0xfc, 0x15, 0x1c, 0x38, 0xe7, 0xc8, 0x11, 0x71, 0x68, - 0x69, 0xfe, 0x07, 0xee, 0x68, 0x66, 0x67, 0xed, 0xf5, 0x47, 0x58, 0xdb, 0x31, 0x27, 0xbf, 0x99, - 0x7d, 0xbf, 0xf7, 0xfd, 0x31, 0x86, 0x7b, 0x4d, 0x87, 0x7d, 0xdd, 0xab, 0x9a, 0x35, 0xb7, 0x5d, - 0xa8, 0xb9, 0x1d, 0x66, 0x3b, 0x1d, 0xe2, 0xd5, 0xc3, 0xa4, 0xdd, 0x75, 0x0a, 0x94, 0x78, 0x7d, - 0xa7, 0x46, 0xa8, 0xb8, 0x27, 0x1d, 0x56, 0xe8, 0xdf, 0x0a, 0x48, 0xb3, 0xeb, 0xb9, 0xcc, 0xc5, - 0xb9, 0x21, 0xc2, 0x0c, 0xb8, 0xcd, 0x80, 0xa5, 0x7f, 0x4b, 0xcb, 0x34, 0xdd, 0xa6, 0x2b, 0x58, - 0x0b, 0x9c, 0xf2, 0x51, 0x9a, 0xde, 0x74, 0xdd, 0x66, 0x8b, 0x14, 0xc4, 0xa9, 0xda, 0x6b, 0x14, - 0x1a, 0x0e, 0x69, 0xd5, 0x9f, 0xb6, 0x6d, 0x7a, 0x24, 0x39, 0x36, 0xc7, 0x39, 0x98, 0xd3, 0x26, - 0x94, 0xd9, 0xed, 0xae, 0x64, 0x78, 0x73, 0x9c, 0x81, 0xb4, 0xbb, 0xec, 0xd8, 0xff, 0x68, 0xfc, - 0x13, 0x87, 0xc4, 0x7e, 0xa7, 0xe1, 0xe2, 0x4f, 0x40, 0xad, 0x3b, 0x4d, 0x42, 0x59, 0x16, 0xe9, - 0x28, 0xbf, 0xb6, 0x5b, 0x3c, 0x7b, 0xb1, 0x19, 0xfb, 0xeb, 0xc5, 0xe6, 0x8d, 0x90, 0xfb, 0x6e, - 0x97, 0x74, 0x06, 0x5e, 0xd0, 0x42, 0xd3, 0xdd, 0xf2, 0x21, 0xe6, 0x5d, 0xf1, 0x63, 0x49, 0x09, - 0x18, 0x43, 0x82, 0x3a, 0xdf, 0x92, 0x6c, 0x5c, 0x47, 0x79, 0xc5, 0x12, 0x34, 0x2e, 0x03, 0xd4, - 0x3c, 0x62, 0x33, 0x52, 0x7f, 0x6a, 0xb3, 0xac, 0xa2, 0xa3, 0x7c, 0xaa, 0xa8, 0x99, 0xbe, 0x69, - 0x66, 0x60, 0x9a, 0x59, 0x09, 0x6c, 0xdf, 0x5d, 0xe5, 0xfa, 0x7f, 0x7c, 0xb9, 0x89, 0xac, 0x35, - 0x89, 0x2b, 0x31, 0x2e, 0xa4, 0xd7, 0xad, 0x07, 0x42, 0x12, 0xf3, 0x08, 0x91, 0xb8, 0x12, 0xc3, - 0xf7, 0x41, 0x6d, 0xd9, 0x55, 0xd2, 0xa2, 0xd9, 0xa4, 0xae, 0xe4, 0x53, 0xc5, 0x6d, 0xf3, 0xbf, - 0x33, 0x63, 0xf2, 0xf8, 0x98, 0x0f, 0x04, 0x64, 0xaf, 0xc3, 0xbc, 0x63, 0x4b, 0xe2, 0xb5, 0xf7, - 0x21, 0x15, 0xba, 0xc6, 0x69, 0x50, 0x8e, 0xc8, 0xb1, 0x1f, 0x3f, 0x8b, 0x93, 0x38, 0x03, 0xc9, - 0xbe, 0xdd, 0xea, 0xf9, 0x91, 0x58, 0xb3, 0xfc, 0xc3, 0x07, 0xf1, 0xf7, 0x90, 0xf1, 0x25, 0xa4, - 0xb8, 0x58, 0x8b, 0x3c, 0xeb, 0xf1, 0x88, 0x2d, 0x31, 0xfa, 0xc6, 0x43, 0x58, 0xf7, 0x45, 0xd3, - 0xae, 0xdb, 0xa1, 0x04, 0x7f, 0x08, 0x09, 0xa7, 0xd3, 0x70, 0x85, 0xe4, 0x54, 0xf1, 0xed, 0x59, - 0xbc, 0xdd, 0x4d, 0x70, 0xfd, 0x96, 0xc0, 0x19, 0xcf, 0x11, 0x5c, 0x79, 0x2c, 0xa2, 0x17, 0x58, - 0x7b, 0x49, 0x89, 0xf8, 0x0e, 0xa4, 0xfc, 0x74, 0x88, 0x3a, 0x16, 0xc1, 0x99, 0x96, 0xc7, 0x7b, - 0xbc, 0xd4, 0x0f, 0x6c, 0x7a, 0x64, 0xc9, 0xac, 0x73, 0xda, 0xf8, 0x0c, 0xae, 0x06, 0xd6, 0x2c, - 0xc9, 0x41, 0x13, 0xf0, 0x03, 0x87, 0xb2, 0xb2, 0xcf, 0x12, 0x38, 0x99, 0x85, 0x95, 0x86, 0xd3, - 0x62, 0xc4, 0xa3, 0x59, 0xa4, 0x2b, 0xf9, 0x35, 0x2b, 0x38, 0x1a, 0x8f, 0x61, 0x63, 0x84, 0x7f, - 0xc2, 0x0c, 0x65, 0x21, 0x33, 0xaa, 0x90, 0xb9, 0x4b, 0x5a, 0x84, 0x91, 0x31, 0x43, 0x96, 0x59, - 0x1b, 0xcf, 0x11, 0x60, 0x8b, 0xd8, 0xf5, 0xff, 0x4f, 0x05, 0xbe, 0x06, 0xaa, 0xdb, 0x68, 0x50, - 0xc2, 0x64, 0xfb, 0xcb, 0xd3, 0x60, 0x28, 0x28, 0xc3, 0xa1, 0x60, 0x94, 0x60, 0x63, 0xc4, 0x1a, - 0x19, 0xc9, 0xa1, 0x08, 0x34, 0x2e, 0xa2, 0x6e, 0x33, 0x5b, 0x08, 0x5e, 0xb7, 0x04, 0x6d, 0xfc, - 0x1c, 0x07, 0xf5, 0x11, 0xb3, 0x59, 0x8f, 0xf2, 0xe9, 0x40, 0x99, 0xed, 0xc9, 0xe9, 0x80, 0xe6, - 0x99, 0x0e, 0x12, 0x37, 0x31, 0x62, 0xe2, 0x8b, 0x8d, 0x98, 0x34, 0x28, 0x1e, 0x69, 0x08, 0x57, - 0xd7, 0x2c, 0x4e, 0x86, 0x5c, 0x4a, 0x8c, 0xb8, 0x94, 0x81, 0x24, 0x73, 0x99, 0xdd, 0xca, 0x26, - 0xc5, 0xb5, 0x7f, 0xc0, 0x0f, 0x61, 0x95, 0x7c, 0xd3, 0x25, 0x35, 0x46, 0xea, 0x59, 0x75, 0xe1, - 0x8c, 0x0c, 0x64, 0x18, 0xd7, 0xe1, 0x8a, 0x1f, 0xa3, 0x20, 0xe1, 0xd2, 0x40, 0x34, 0x30, 0x90, - 0xb7, 0x55, 0xc0, 0x32, 0xa8, 0x67, 0x95, 0x8a, 0x1b, 0x19, 0xca, 0x77, 0xa2, 0x2a, 0x5a, 0xe2, - 0x25, 0xca, 0x28, 0xf8, 0x6d, 0xe2, 0xdf, 0x12, 0x1a, 0xdd, 0x57, 0x5f, 0x41, 0x66, 0x14, 0x20, - 0x0d, 0xb9, 0x0f, 0xab, 0x54, 0xde, 0xc9, 0xe6, 0x9a, 0xd1, 0x14, 0xd9, 0x5e, 0x03, 0xb4, 0xf1, - 0x93, 0x02, 0x1b, 0x87, 0x9e, 0x33, 0xd1, 0x62, 0x65, 0x50, 0xed, 0x1a, 0x73, 0xdc, 0x8e, 0x70, - 0xf5, 0x6a, 0xf1, 0x66, 0x94, 0x7c, 0x21, 0xa4, 0x24, 0x20, 0x96, 0x84, 0x06, 0x31, 0x8d, 0x0f, - 0x93, 0x3e, 0x48, 0xae, 0x72, 0x51, 0x72, 0x13, 0x97, 0x4f, 0x6e, 0xa8, 0xb4, 0x92, 0x53, 0xbb, - 0x45, 0x1d, 0x76, 0x0b, 0x3e, 0x1c, 0xec, 0xbe, 0x15, 0x11, 0xc8, 0x8f, 0x66, 0x72, 0x74, 0x34, - 0x5a, 0xcb, 0x5e, 0x85, 0x2f, 0xe3, 0x90, 0x19, 0x55, 0x23, 0xf3, 0xbe, 0x94, 0xac, 0x8c, 0x0e, - 0x85, 0xf8, 0x32, 0x86, 0x82, 0xb2, 0xd8, 0x50, 0x98, 0x6f, 0x04, 0x0c, 0x47, 0xb2, 0x7a, 0xe9, - 0xa9, 0xaf, 0xc3, 0x7a, 0xa9, 0xea, 0x7a, 0xec, 0xc2, 0xee, 0xbf, 0xf1, 0x3d, 0x82, 0x54, 0x28, - 0x7a, 0xf8, 0x2d, 0x48, 0x3c, 0xaa, 0x94, 0x2a, 0xe9, 0x98, 0xb6, 0x71, 0x72, 0xaa, 0xbf, 0x16, - 0xfa, 0xc4, 0x3b, 0x0b, 0x6f, 0x42, 0xf2, 0xd0, 0xda, 0xaf, 0xec, 0xa5, 0x91, 0x96, 0x39, 0x39, - 0xd5, 0xd3, 0xa1, 0xef, 0x82, 0xc4, 0xd7, 0x41, 0x2d, 0x7f, 0x7a, 0x70, 0xb0, 0x5f, 0x49, 0xc7, - 0xb5, 0x37, 0x4e, 0x4e, 0xf5, 0xd7, 0x43, 0x1c, 0x65, 0xb7, 0xdd, 0x76, 0x98, 0xb6, 0xf1, 0xc3, - 0x2f, 0xb9, 0xd8, 0x6f, 0xbf, 0xe6, 0xc2, 0x7a, 0x8b, 0xbf, 0xaf, 0xc0, 0x8a, 0x2c, 0x03, 0x6c, - 0xcb, 0x97, 0xe9, 0xcd, 0x59, 0x36, 0xa9, 0x74, 0x4d, 0x7b, 0x77, 0x36, 0x66, 0x59, 0x61, 0x4d, - 0x50, 0xfd, 0xb7, 0x04, 0xde, 0x8a, 0xc2, 0x8d, 0xbc, 0x80, 0x34, 0x73, 0x56, 0x76, 0xa9, 0xe8, - 0x19, 0x24, 0xf8, 0x68, 0xc3, 0xc5, 0x28, 0xdc, 0xe4, 0x43, 0x44, 0xdb, 0x99, 0x0b, 0xe3, 0x2b, - 0xdc, 0x46, 0xf8, 0x73, 0x50, 0xfd, 0xe7, 0x04, 0xbe, 0x1d, 0x25, 0x60, 0xda, 0xb3, 0x43, 0xbb, - 0x36, 0x51, 0xdf, 0x7b, 0xfc, 0x7f, 0x03, 0x77, 0x85, 0xef, 0xec, 0x68, 0x57, 0x26, 0xdf, 0x19, - 0xd1, 0xae, 0x4c, 0x79, 0x0d, 0x6c, 0x23, 0x9e, 0x26, 0xb9, 0xe2, 0xb7, 0x66, 0xdc, 0x41, 0xb3, - 0xa6, 0x69, 0x6c, 0xe5, 0x1d, 0xc3, 0x7a, 0x78, 0x03, 0xe1, 0x99, 0x42, 0x3f, 0xb6, 0xe0, 0xb4, - 0xdb, 0xf3, 0x81, 0xa4, 0xea, 0x3e, 0x24, 0xfd, 0xd6, 0xd9, 0x59, 0x60, 0x24, 0x47, 0xeb, 0x9c, - 0x36, 0x60, 0xf3, 0x68, 0x1b, 0xe1, 0x03, 0x48, 0x8a, 0xd9, 0x80, 0x23, 0x3b, 0x27, 0x3c, 0x42, - 0x2e, 0xaa, 0x8e, 0xdd, 0x27, 0x67, 0xaf, 0x72, 0xb1, 0x3f, 0x5f, 0xe5, 0x62, 0xdf, 0x9d, 0xe7, - 0xd0, 0xd9, 0x79, 0x0e, 0xfd, 0x71, 0x9e, 0x43, 0x7f, 0x9f, 0xe7, 0xd0, 0x93, 0x8f, 0x17, 0xfd, - 0x1f, 0x7d, 0x47, 0x92, 0x5f, 0xc4, 0xaa, 0xaa, 0xd0, 0xb6, 0xf3, 0x6f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xc0, 0xc2, 0x35, 0xb1, 0x94, 0x0f, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ContentClient is the client API for Content service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ContentClient interface { - // Info returns information about a committed object. - // - // This call can be used for getting the size of content and checking for - // existence. - Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) - // Update updates content metadata. - // - // This call can be used to manage the mutable content labels. The - // immutable metadata such as digest, size, and committed at cannot - // be updated. - Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) - // List streams the entire set of content as Info objects and closes the - // stream. - // - // Typically, this will yield a large response, chunked into messages. - // Clients should make provisions to ensure they can handle the entire data - // set. - List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) - // Delete will delete the referenced object. - Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*types.Empty, error) - // Read allows one to read an object based on the offset into the content. - // - // The requested data may be returned in one or more messages. - Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) - // Status returns the status for a single reference. - Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) - // ListStatuses returns the status of ongoing object ingestions, started via - // Write. - // - // Only those matching the regular expression will be provided in the - // response. If the provided regular expression is empty, all ingestions - // will be provided. - ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) - // Write begins or resumes writes to a resource identified by a unique ref. - // Only one active stream may exist at a time for each ref. - // - // Once a write stream has started, it may only write to a single ref, thus - // once a stream is started, the ref may be omitted on subsequent writes. - // - // For any write transaction represented by a ref, only a single write may - // be made to a given offset. If overlapping writes occur, it is an error. - // Writes should be sequential and implementations may throw an error if - // this is required. - // - // If expected_digest is set and already part of the content store, the - // write will fail. - // - // When completed, the commit flag should be set to true. If expected size - // or digest is set, the content will be validated against those values. - Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) - // Abort cancels the ongoing write named in the request. Any resources - // associated with the write will be collected. - Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*types.Empty, error) -} - -type contentClient struct { - cc *grpc.ClientConn -} - -func NewContentClient(cc *grpc.ClientConn) ContentClient { - return &contentClient{cc} -} - -func (c *contentClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { - out := new(InfoResponse) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Info", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *contentClient) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) { - out := new(UpdateResponse) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *contentClient) List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) { - stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[0], "/containerd.services.content.v1.Content/List", opts...) - if err != nil { - return nil, err - } - x := &contentListClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Content_ListClient interface { - Recv() (*ListContentResponse, error) - grpc.ClientStream -} - -type contentListClient struct { - grpc.ClientStream -} - -func (x *contentListClient) Recv() (*ListContentResponse, error) { - m := new(ListContentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *contentClient) Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *contentClient) Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) { - stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[1], "/containerd.services.content.v1.Content/Read", opts...) - if err != nil { - return nil, err - } - x := &contentReadClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Content_ReadClient interface { - Recv() (*ReadContentResponse, error) - grpc.ClientStream -} - -type contentReadClient struct { - grpc.ClientStream -} - -func (x *contentReadClient) Recv() (*ReadContentResponse, error) { - m := new(ReadContentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *contentClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Status", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *contentClient) ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) { - out := new(ListStatusesResponse) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/ListStatuses", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *contentClient) Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) { - stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[2], "/containerd.services.content.v1.Content/Write", opts...) - if err != nil { - return nil, err - } - x := &contentWriteClient{stream} - return x, nil -} - -type Content_WriteClient interface { - Send(*WriteContentRequest) error - Recv() (*WriteContentResponse, error) - grpc.ClientStream -} - -type contentWriteClient struct { - grpc.ClientStream -} - -func (x *contentWriteClient) Send(m *WriteContentRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *contentWriteClient) Recv() (*WriteContentResponse, error) { - m := new(WriteContentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *contentClient) Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Abort", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ContentServer is the server API for Content service. -type ContentServer interface { - // Info returns information about a committed object. - // - // This call can be used for getting the size of content and checking for - // existence. - Info(context.Context, *InfoRequest) (*InfoResponse, error) - // Update updates content metadata. - // - // This call can be used to manage the mutable content labels. The - // immutable metadata such as digest, size, and committed at cannot - // be updated. - Update(context.Context, *UpdateRequest) (*UpdateResponse, error) - // List streams the entire set of content as Info objects and closes the - // stream. - // - // Typically, this will yield a large response, chunked into messages. - // Clients should make provisions to ensure they can handle the entire data - // set. - List(*ListContentRequest, Content_ListServer) error - // Delete will delete the referenced object. - Delete(context.Context, *DeleteContentRequest) (*types.Empty, error) - // Read allows one to read an object based on the offset into the content. - // - // The requested data may be returned in one or more messages. - Read(*ReadContentRequest, Content_ReadServer) error - // Status returns the status for a single reference. - Status(context.Context, *StatusRequest) (*StatusResponse, error) - // ListStatuses returns the status of ongoing object ingestions, started via - // Write. - // - // Only those matching the regular expression will be provided in the - // response. If the provided regular expression is empty, all ingestions - // will be provided. - ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error) - // Write begins or resumes writes to a resource identified by a unique ref. - // Only one active stream may exist at a time for each ref. - // - // Once a write stream has started, it may only write to a single ref, thus - // once a stream is started, the ref may be omitted on subsequent writes. - // - // For any write transaction represented by a ref, only a single write may - // be made to a given offset. If overlapping writes occur, it is an error. - // Writes should be sequential and implementations may throw an error if - // this is required. - // - // If expected_digest is set and already part of the content store, the - // write will fail. - // - // When completed, the commit flag should be set to true. If expected size - // or digest is set, the content will be validated against those values. - Write(Content_WriteServer) error - // Abort cancels the ongoing write named in the request. Any resources - // associated with the write will be collected. - Abort(context.Context, *AbortRequest) (*types.Empty, error) -} - -// UnimplementedContentServer can be embedded to have forward compatible implementations. -type UnimplementedContentServer struct { -} - -func (*UnimplementedContentServer) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") -} -func (*UnimplementedContentServer) Update(ctx context.Context, req *UpdateRequest) (*UpdateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (*UnimplementedContentServer) List(req *ListContentRequest, srv Content_ListServer) error { - return status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedContentServer) Delete(ctx context.Context, req *DeleteContentRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (*UnimplementedContentServer) Read(req *ReadContentRequest, srv Content_ReadServer) error { - return status.Errorf(codes.Unimplemented, "method Read not implemented") -} -func (*UnimplementedContentServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} -func (*UnimplementedContentServer) ListStatuses(ctx context.Context, req *ListStatusesRequest) (*ListStatusesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListStatuses not implemented") -} -func (*UnimplementedContentServer) Write(srv Content_WriteServer) error { - return status.Errorf(codes.Unimplemented, "method Write not implemented") -} -func (*UnimplementedContentServer) Abort(ctx context.Context, req *AbortRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Abort not implemented") -} - -func RegisterContentServer(s *grpc.Server, srv ContentServer) { - s.RegisterService(&_Content_serviceDesc, srv) -} - -func _Content_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(InfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Info(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Info", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Info(ctx, req.(*InfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Update(ctx, req.(*UpdateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_List_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ListContentRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ContentServer).List(m, &contentListServer{stream}) -} - -type Content_ListServer interface { - Send(*ListContentResponse) error - grpc.ServerStream -} - -type contentListServer struct { - grpc.ServerStream -} - -func (x *contentListServer) Send(m *ListContentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Content_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteContentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Delete(ctx, req.(*DeleteContentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_Read_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ReadContentRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ContentServer).Read(m, &contentReadServer{stream}) -} - -type Content_ReadServer interface { - Send(*ReadContentResponse) error - grpc.ServerStream -} - -type contentReadServer struct { - grpc.ServerStream -} - -func (x *contentReadServer) Send(m *ReadContentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Content_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Status(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Status", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Status(ctx, req.(*StatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_ListStatuses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListStatusesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).ListStatuses(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/ListStatuses", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).ListStatuses(ctx, req.(*ListStatusesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_Write_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ContentServer).Write(&contentWriteServer{stream}) -} - -type Content_WriteServer interface { - Send(*WriteContentResponse) error - Recv() (*WriteContentRequest, error) - grpc.ServerStream -} - -type contentWriteServer struct { - grpc.ServerStream -} - -func (x *contentWriteServer) Send(m *WriteContentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *contentWriteServer) Recv() (*WriteContentRequest, error) { - m := new(WriteContentRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Content_Abort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AbortRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Abort(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Abort", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Abort(ctx, req.(*AbortRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Content_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.content.v1.Content", - HandlerType: (*ContentServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Info", - Handler: _Content_Info_Handler, - }, - { - MethodName: "Update", - Handler: _Content_Update_Handler, - }, - { - MethodName: "Delete", - Handler: _Content_Delete_Handler, - }, - { - MethodName: "Status", - Handler: _Content_Status_Handler, - }, - { - MethodName: "ListStatuses", - Handler: _Content_ListStatuses_Handler, - }, - { - MethodName: "Abort", - Handler: _Content_Abort_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "List", - Handler: _Content_List_Handler, - ServerStreams: true, - }, - { - StreamName: "Read", - Handler: _Content_Read_Handler, - ServerStreams: true, - }, - { - StreamName: "Write", - Handler: _Content_Write_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "github.com/containerd/containerd/api/services/content/v1/content.proto", -} - -func (m *Info) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Info) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintContent(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintContent(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintContent(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a - } - } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintContent(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x22 - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintContent(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x1a - if m.Size_ != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x10 - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *InfoRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *InfoResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.UpdateMask != nil { - { - size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ListContentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListContentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintContent(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ListContentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListContentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Info) > 0 { - for iNdEx := len(m.Info) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Info[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DeleteContentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteContentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ReadContentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadContentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Size_ != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x18 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x10 - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ReadContentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadContentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintContent(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Status) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Status) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Expected) > 0 { - i -= len(m.Expected) - copy(dAtA[i:], m.Expected) - i = encodeVarintContent(dAtA, i, uint64(len(m.Expected))) - i-- - dAtA[i] = 0x32 - } - if m.Total != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x28 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x20 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0x1a - } - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err7 != nil { - return 0, err7 - } - i -= n7 - i = encodeVarintContent(dAtA, i, uint64(n7)) - i-- - dAtA[i] = 0x12 - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt):]) - if err8 != nil { - return 0, err8 - } - i -= n8 - i = encodeVarintContent(dAtA, i, uint64(n8)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *StatusRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StatusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Status != nil { - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListStatusesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListStatusesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListStatusesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintContent(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ListStatusesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListStatusesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListStatusesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Statuses) > 0 { - for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *WriteContentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteContentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintContent(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintContent(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintContent(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x3a - } - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintContent(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x32 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x28 - } - if len(m.Expected) > 0 { - i -= len(m.Expected) - copy(dAtA[i:], m.Expected) - i = encodeVarintContent(dAtA, i, uint64(len(m.Expected))) - i-- - dAtA[i] = 0x22 - } - if m.Total != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x18 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0x12 - } - if m.Action != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Action)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *WriteContentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteContentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0x32 - } - if m.Total != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x28 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x20 - } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err10 != nil { - return 0, err10 - } - i -= n10 - i = encodeVarintContent(dAtA, i, uint64(n10)) - i-- - dAtA[i] = 0x1a - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt):]) - if err11 != nil { - return 0, err11 - } - i -= n11 - i = encodeVarintContent(dAtA, i, uint64(n11)) - i-- - dAtA[i] = 0x12 - if m.Action != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Action)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *AbortRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AbortRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AbortRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintContent(dAtA []byte, offset int, v uint64) int { - offset -= sovContent(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Info) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Size_ != 0 { - n += 1 + sovContent(uint64(m.Size_)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovContent(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovContent(uint64(l)) - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v))) - n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *InfoRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *InfoResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Info.Size() - n += 1 + l + sovContent(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Info.Size() - n += 1 + l + sovContent(uint64(l)) - if m.UpdateMask != nil { - l = m.UpdateMask.Size() - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Info.Size() - n += 1 + l + sovContent(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListContentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovContent(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListContentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Info) > 0 { - for _, e := range m.Info { - l = e.Size() - n += 1 + l + sovContent(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteContentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReadContentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - if m.Size_ != 0 { - n += 1 + sovContent(uint64(m.Size_)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReadContentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Status) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt) - n += 1 + l + sovContent(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovContent(uint64(l)) - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - if m.Total != 0 { - n += 1 + sovContent(uint64(m.Total)) - } - l = len(m.Expected) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListStatusesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovContent(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListStatusesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Statuses) > 0 { - for _, e := range m.Statuses { - l = e.Size() - n += 1 + l + sovContent(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WriteContentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Action != 0 { - n += 1 + sovContent(uint64(m.Action)) - } - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Total != 0 { - n += 1 + sovContent(uint64(m.Total)) - } - l = len(m.Expected) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v))) - n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WriteContentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Action != 0 { - n += 1 + sovContent(uint64(m.Action)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt) - n += 1 + l + sovContent(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovContent(uint64(l)) - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - if m.Total != 0 { - n += 1 + sovContent(uint64(m.Total)) - } - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AbortRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovContent(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozContent(x uint64) (n int) { - return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Info) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&Info{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, - `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *InfoRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InfoRequest{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *InfoResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InfoResponse{`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateRequest{`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types.FieldMask", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateResponse{`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListContentRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListContentRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListContentResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForInfo := "[]Info{" - for _, f := range this.Info { - repeatedStringForInfo += strings.Replace(strings.Replace(f.String(), "Info", "Info", 1), `&`, ``, 1) + "," - } - repeatedStringForInfo += "}" - s := strings.Join([]string{`&ListContentResponse{`, - `Info:` + repeatedStringForInfo + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteContentRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteContentRequest{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ReadContentRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReadContentRequest{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ReadContentResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReadContentResponse{`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Status) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Status{`, - `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Total:` + fmt.Sprintf("%v", this.Total) + `,`, - `Expected:` + fmt.Sprintf("%v", this.Expected) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatusRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatusRequest{`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatusResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatusResponse{`, - `Status:` + strings.Replace(this.Status.String(), "Status", "Status", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListStatusesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListStatusesRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListStatusesResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForStatuses := "[]Status{" - for _, f := range this.Statuses { - repeatedStringForStatuses += strings.Replace(strings.Replace(f.String(), "Status", "Status", 1), `&`, ``, 1) + "," - } - repeatedStringForStatuses += "}" - s := strings.Join([]string{`&ListStatusesResponse{`, - `Statuses:` + repeatedStringForStatuses + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WriteContentRequest) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&WriteContentRequest{`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `Total:` + fmt.Sprintf("%v", this.Total) + `,`, - `Expected:` + fmt.Sprintf("%v", this.Expected) + `,`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WriteContentResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WriteContentResponse{`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Total:` + fmt.Sprintf("%v", this.Total) + `,`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *AbortRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AbortRequest{`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringContent(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Info) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Info: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthContent - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthContent - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InfoRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InfoResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InfoResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdateMask == nil { - m.UpdateMask = &types.FieldMask{} - } - if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListContentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListContentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListContentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListContentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Info = append(m.Info, Info{}) - if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteContentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteContentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadContentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadContentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadContentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadContentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Status) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Status: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expected = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &Status{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListStatusesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListStatusesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListStatusesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListStatusesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListStatusesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListStatusesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Statuses = append(m.Statuses, Status{}) - if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WriteContentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteContentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - m.Action = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Action |= WriteAction(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expected = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthContent - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthContent - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WriteContentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteContentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - m.Action = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Action |= WriteAction(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AbortRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AbortRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AbortRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipContent(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthContent - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupContent - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthContent - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{17} +} + +func (x *AbortRequest) GetRef() string { + if x != nil { + return x.Ref + } + return "" +} + +var File_github_com_containerd_containerd_api_services_content_v1_content_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDesc = []byte{ + 0x0a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, + 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x02, 0x0a, 0x04, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x39, 0x0a, + 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x64, 0x41, 0x74, 0x12, 0x48, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, + 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x0b, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, + 0x48, 0x0a, 0x0c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x38, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x69, + 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, + 0x73, 0x6b, 0x22, 0x4a, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x2e, + 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x4f, + 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, + 0x2e, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, + 0x58, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, + 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x41, 0x0a, 0x13, 0x52, 0x65, 0x61, + 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xda, 0x01, 0x0a, + 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, + 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, + 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x1a, 0x0a, + 0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x22, 0x21, 0x0a, 0x0d, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, + 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x22, 0x50, 0x0a, 0x0e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x2f, + 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, + 0x5a, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x22, 0xde, 0x02, 0x0a, 0x13, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, + 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x57, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x97, 0x02, 0x0a, + 0x14, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x16, + 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x20, 0x0a, 0x0c, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x2a, 0x2e, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x41, 0x54, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, + 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x02, 0x32, 0xbe, 0x07, 0x0a, 0x07, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x12, 0x61, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x12, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, + 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x71, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x30, 0x01, 0x12, 0x56, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x34, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x71, 0x0a, 0x04, 0x52, + 0x65, 0x61, 0x64, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x67, + 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x79, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x76, 0x0a, 0x05, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x33, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4d, 0x0a, 0x05, 0x41, 0x62, + 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x3b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowContent = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupContent = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescData = file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_content_v1_content_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_github_com_containerd_containerd_api_services_content_v1_content_proto_goTypes = []interface{}{ + (WriteAction)(0), // 0: containerd.services.content.v1.WriteAction + (*Info)(nil), // 1: containerd.services.content.v1.Info + (*InfoRequest)(nil), // 2: containerd.services.content.v1.InfoRequest + (*InfoResponse)(nil), // 3: containerd.services.content.v1.InfoResponse + (*UpdateRequest)(nil), // 4: containerd.services.content.v1.UpdateRequest + (*UpdateResponse)(nil), // 5: containerd.services.content.v1.UpdateResponse + (*ListContentRequest)(nil), // 6: containerd.services.content.v1.ListContentRequest + (*ListContentResponse)(nil), // 7: containerd.services.content.v1.ListContentResponse + (*DeleteContentRequest)(nil), // 8: containerd.services.content.v1.DeleteContentRequest + (*ReadContentRequest)(nil), // 9: containerd.services.content.v1.ReadContentRequest + (*ReadContentResponse)(nil), // 10: containerd.services.content.v1.ReadContentResponse + (*Status)(nil), // 11: containerd.services.content.v1.Status + (*StatusRequest)(nil), // 12: containerd.services.content.v1.StatusRequest + (*StatusResponse)(nil), // 13: containerd.services.content.v1.StatusResponse + (*ListStatusesRequest)(nil), // 14: containerd.services.content.v1.ListStatusesRequest + (*ListStatusesResponse)(nil), // 15: containerd.services.content.v1.ListStatusesResponse + (*WriteContentRequest)(nil), // 16: containerd.services.content.v1.WriteContentRequest + (*WriteContentResponse)(nil), // 17: containerd.services.content.v1.WriteContentResponse + (*AbortRequest)(nil), // 18: containerd.services.content.v1.AbortRequest + nil, // 19: containerd.services.content.v1.Info.LabelsEntry + nil, // 20: containerd.services.content.v1.WriteContentRequest.LabelsEntry + (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 22: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 23: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_content_v1_content_proto_depIdxs = []int32{ + 21, // 0: containerd.services.content.v1.Info.created_at:type_name -> google.protobuf.Timestamp + 21, // 1: containerd.services.content.v1.Info.updated_at:type_name -> google.protobuf.Timestamp + 19, // 2: containerd.services.content.v1.Info.labels:type_name -> containerd.services.content.v1.Info.LabelsEntry + 1, // 3: containerd.services.content.v1.InfoResponse.info:type_name -> containerd.services.content.v1.Info + 1, // 4: containerd.services.content.v1.UpdateRequest.info:type_name -> containerd.services.content.v1.Info + 22, // 5: containerd.services.content.v1.UpdateRequest.update_mask:type_name -> google.protobuf.FieldMask + 1, // 6: containerd.services.content.v1.UpdateResponse.info:type_name -> containerd.services.content.v1.Info + 1, // 7: containerd.services.content.v1.ListContentResponse.info:type_name -> containerd.services.content.v1.Info + 21, // 8: containerd.services.content.v1.Status.started_at:type_name -> google.protobuf.Timestamp + 21, // 9: containerd.services.content.v1.Status.updated_at:type_name -> google.protobuf.Timestamp + 11, // 10: containerd.services.content.v1.StatusResponse.status:type_name -> containerd.services.content.v1.Status + 11, // 11: containerd.services.content.v1.ListStatusesResponse.statuses:type_name -> containerd.services.content.v1.Status + 0, // 12: containerd.services.content.v1.WriteContentRequest.action:type_name -> containerd.services.content.v1.WriteAction + 20, // 13: containerd.services.content.v1.WriteContentRequest.labels:type_name -> containerd.services.content.v1.WriteContentRequest.LabelsEntry + 0, // 14: containerd.services.content.v1.WriteContentResponse.action:type_name -> containerd.services.content.v1.WriteAction + 21, // 15: containerd.services.content.v1.WriteContentResponse.started_at:type_name -> google.protobuf.Timestamp + 21, // 16: containerd.services.content.v1.WriteContentResponse.updated_at:type_name -> google.protobuf.Timestamp + 2, // 17: containerd.services.content.v1.Content.Info:input_type -> containerd.services.content.v1.InfoRequest + 4, // 18: containerd.services.content.v1.Content.Update:input_type -> containerd.services.content.v1.UpdateRequest + 6, // 19: containerd.services.content.v1.Content.List:input_type -> containerd.services.content.v1.ListContentRequest + 8, // 20: containerd.services.content.v1.Content.Delete:input_type -> containerd.services.content.v1.DeleteContentRequest + 9, // 21: containerd.services.content.v1.Content.Read:input_type -> containerd.services.content.v1.ReadContentRequest + 12, // 22: containerd.services.content.v1.Content.Status:input_type -> containerd.services.content.v1.StatusRequest + 14, // 23: containerd.services.content.v1.Content.ListStatuses:input_type -> containerd.services.content.v1.ListStatusesRequest + 16, // 24: containerd.services.content.v1.Content.Write:input_type -> containerd.services.content.v1.WriteContentRequest + 18, // 25: containerd.services.content.v1.Content.Abort:input_type -> containerd.services.content.v1.AbortRequest + 3, // 26: containerd.services.content.v1.Content.Info:output_type -> containerd.services.content.v1.InfoResponse + 5, // 27: containerd.services.content.v1.Content.Update:output_type -> containerd.services.content.v1.UpdateResponse + 7, // 28: containerd.services.content.v1.Content.List:output_type -> containerd.services.content.v1.ListContentResponse + 23, // 29: containerd.services.content.v1.Content.Delete:output_type -> google.protobuf.Empty + 10, // 30: containerd.services.content.v1.Content.Read:output_type -> containerd.services.content.v1.ReadContentResponse + 13, // 31: containerd.services.content.v1.Content.Status:output_type -> containerd.services.content.v1.StatusResponse + 15, // 32: containerd.services.content.v1.Content.ListStatuses:output_type -> containerd.services.content.v1.ListStatusesResponse + 17, // 33: containerd.services.content.v1.Content.Write:output_type -> containerd.services.content.v1.WriteContentResponse + 23, // 34: containerd.services.content.v1.Content.Abort:output_type -> google.protobuf.Empty + 26, // [26:35] is the sub-list for method output_type + 17, // [17:26] is the sub-list for method input_type + 17, // [17:17] is the sub-list for extension type_name + 17, // [17:17] is the sub-list for extension extendee + 0, // [0:17] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_content_v1_content_proto_init() } +func file_github_com_containerd_containerd_api_services_content_v1_content_proto_init() { + if File_github_com_containerd_containerd_api_services_content_v1_content_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InfoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListContentRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListContentResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteContentRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadContentRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadContentResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListStatusesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListStatusesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteContentRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteContentResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AbortRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDesc, + NumEnums: 1, + NumMessages: 20, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_content_v1_content_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_content_v1_content_proto_depIdxs, + EnumInfos: file_github_com_containerd_containerd_api_services_content_v1_content_proto_enumTypes, + MessageInfos: file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_content_v1_content_proto = out.File + file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_content_v1_content_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_content_v1_content_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto index f43b6497a9..8aea0636b8 100644 --- a/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto +++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto @@ -18,7 +18,6 @@ syntax = "proto3"; package containerd.services.content.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/empty.proto"; @@ -92,16 +91,16 @@ service Content { message Info { // Digest is the hash identity of the blob. - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 1; // Size is the total number of bytes in the blob. int64 size = 2; // CreatedAt provides the time at which the blob was committed. - google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp created_at = 3; // UpdatedAt provides the time the info was last updated. - google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp updated_at = 4; // Labels are arbitrary data on snapshots. // @@ -110,15 +109,15 @@ message Info { } message InfoRequest { - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 1; } message InfoResponse { - Info info = 1 [(gogoproto.nullable) = false]; + Info info = 1; } message UpdateRequest { - Info info = 1 [(gogoproto.nullable) = false]; + Info info = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. @@ -130,7 +129,7 @@ message UpdateRequest { } message UpdateResponse { - Info info = 1 [(gogoproto.nullable) = false]; + Info info = 1; } message ListContentRequest { @@ -148,19 +147,19 @@ message ListContentRequest { } message ListContentResponse { - repeated Info info = 1 [(gogoproto.nullable) = false]; + repeated Info info = 1; } message DeleteContentRequest { // Digest specifies which content to delete. - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 1; } // ReadContentRequest defines the fields that make up a request to read a portion of // data from a stored object. message ReadContentRequest { // Digest is the hash identity to read. - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 1; // Offset specifies the number of bytes from the start at which to begin // the read. If zero or less, the read will be from the start. This uses @@ -179,12 +178,12 @@ message ReadContentResponse { } message Status { - google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp started_at = 1; + google.protobuf.Timestamp updated_at = 2; string ref = 3; int64 offset = 4; int64 total = 5; - string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string expected = 6; } @@ -201,17 +200,14 @@ message ListStatusesRequest { } message ListStatusesResponse { - repeated Status statuses = 1 [(gogoproto.nullable) = false]; + repeated Status statuses = 1; } // WriteAction defines the behavior of a WriteRequest. enum WriteAction { - option (gogoproto.goproto_enum_prefix) = false; - option (gogoproto.enum_customname) = "WriteAction"; - // WriteActionStat instructs the writer to return the current status while // holding the lock on the write. - STAT = 0 [(gogoproto.enumvalue_customname) = "WriteActionStat"]; + STAT = 0; // WriteActionWrite sets the action for the write request to write data. // @@ -219,7 +215,7 @@ enum WriteAction { // transaction will be left open for further writes. // // This is the default. - WRITE = 1 [(gogoproto.enumvalue_customname) = "WriteActionWrite"]; + WRITE = 1; // WriteActionCommit will write any outstanding data in the message and // commit the write, storing it under the digest. @@ -228,7 +224,7 @@ enum WriteAction { // commit it. // // This action will always terminate the write. - COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"]; + COMMIT = 2; } // WriteContentRequest writes data to the request ref at offset. @@ -269,7 +265,7 @@ message WriteContentRequest { // Only the latest version will be used to check the content against the // digest. It is only required to include it on a single message, before or // with the commit action message. - string expected = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string expected = 4; // Offset specifies the number of bytes from the start at which to begin // the write. For most implementations, this means from the start of the @@ -304,13 +300,13 @@ message WriteContentResponse { // // This must be set for stat and commit write actions. All other write // actions may omit this. - google.protobuf.Timestamp started_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp started_at = 2; // UpdatedAt provides the last time of a successful write. // // This must be set for stat and commit write actions. All other write // actions may omit this. - google.protobuf.Timestamp updated_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp updated_at = 3; // Offset is the current committed size for the write. int64 offset = 4; @@ -326,7 +322,7 @@ message WriteContentResponse { // Digest, if present, includes the digest up to the currently committed // bytes. If action is commit, this field will be set. It is implementation // defined if this is set for other actions. - string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 6; } message AbortRequest { diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content_grpc.pb.go new file mode 100644 index 0000000000..e230c45a67 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content_grpc.pb.go @@ -0,0 +1,569 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/content/v1/content.proto + +package content + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ContentClient is the client API for Content service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ContentClient interface { + // Info returns information about a committed object. + // + // This call can be used for getting the size of content and checking for + // existence. + Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) + // Update updates content metadata. + // + // This call can be used to manage the mutable content labels. The + // immutable metadata such as digest, size, and committed at cannot + // be updated. + Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) + // List streams the entire set of content as Info objects and closes the + // stream. + // + // Typically, this will yield a large response, chunked into messages. + // Clients should make provisions to ensure they can handle the entire data + // set. + List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) + // Delete will delete the referenced object. + Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Read allows one to read an object based on the offset into the content. + // + // The requested data may be returned in one or more messages. + Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) + // Status returns the status for a single reference. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + // ListStatuses returns the status of ongoing object ingestions, started via + // Write. + // + // Only those matching the regular expression will be provided in the + // response. If the provided regular expression is empty, all ingestions + // will be provided. + ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) + // Write begins or resumes writes to a resource identified by a unique ref. + // Only one active stream may exist at a time for each ref. + // + // Once a write stream has started, it may only write to a single ref, thus + // once a stream is started, the ref may be omitted on subsequent writes. + // + // For any write transaction represented by a ref, only a single write may + // be made to a given offset. If overlapping writes occur, it is an error. + // Writes should be sequential and implementations may throw an error if + // this is required. + // + // If expected_digest is set and already part of the content store, the + // write will fail. + // + // When completed, the commit flag should be set to true. If expected size + // or digest is set, the content will be validated against those values. + Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) + // Abort cancels the ongoing write named in the request. Any resources + // associated with the write will be collected. + Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type contentClient struct { + cc grpc.ClientConnInterface +} + +func NewContentClient(cc grpc.ClientConnInterface) ContentClient { + return &contentClient{cc} +} + +func (c *contentClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { + out := new(InfoResponse) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Info", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) { + out := new(UpdateResponse) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) { + stream, err := c.cc.NewStream(ctx, &Content_ServiceDesc.Streams[0], "/containerd.services.content.v1.Content/List", opts...) + if err != nil { + return nil, err + } + x := &contentListClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Content_ListClient interface { + Recv() (*ListContentResponse, error) + grpc.ClientStream +} + +type contentListClient struct { + grpc.ClientStream +} + +func (x *contentListClient) Recv() (*ListContentResponse, error) { + m := new(ListContentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *contentClient) Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) { + stream, err := c.cc.NewStream(ctx, &Content_ServiceDesc.Streams[1], "/containerd.services.content.v1.Content/Read", opts...) + if err != nil { + return nil, err + } + x := &contentReadClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Content_ReadClient interface { + Recv() (*ReadContentResponse, error) + grpc.ClientStream +} + +type contentReadClient struct { + grpc.ClientStream +} + +func (x *contentReadClient) Recv() (*ReadContentResponse, error) { + m := new(ReadContentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *contentClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Status", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) { + out := new(ListStatusesResponse) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/ListStatuses", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) { + stream, err := c.cc.NewStream(ctx, &Content_ServiceDesc.Streams[2], "/containerd.services.content.v1.Content/Write", opts...) + if err != nil { + return nil, err + } + x := &contentWriteClient{stream} + return x, nil +} + +type Content_WriteClient interface { + Send(*WriteContentRequest) error + Recv() (*WriteContentResponse, error) + grpc.ClientStream +} + +type contentWriteClient struct { + grpc.ClientStream +} + +func (x *contentWriteClient) Send(m *WriteContentRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *contentWriteClient) Recv() (*WriteContentResponse, error) { + m := new(WriteContentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *contentClient) Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Abort", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ContentServer is the server API for Content service. +// All implementations must embed UnimplementedContentServer +// for forward compatibility +type ContentServer interface { + // Info returns information about a committed object. + // + // This call can be used for getting the size of content and checking for + // existence. + Info(context.Context, *InfoRequest) (*InfoResponse, error) + // Update updates content metadata. + // + // This call can be used to manage the mutable content labels. The + // immutable metadata such as digest, size, and committed at cannot + // be updated. + Update(context.Context, *UpdateRequest) (*UpdateResponse, error) + // List streams the entire set of content as Info objects and closes the + // stream. + // + // Typically, this will yield a large response, chunked into messages. + // Clients should make provisions to ensure they can handle the entire data + // set. + List(*ListContentRequest, Content_ListServer) error + // Delete will delete the referenced object. + Delete(context.Context, *DeleteContentRequest) (*emptypb.Empty, error) + // Read allows one to read an object based on the offset into the content. + // + // The requested data may be returned in one or more messages. + Read(*ReadContentRequest, Content_ReadServer) error + // Status returns the status for a single reference. + Status(context.Context, *StatusRequest) (*StatusResponse, error) + // ListStatuses returns the status of ongoing object ingestions, started via + // Write. + // + // Only those matching the regular expression will be provided in the + // response. If the provided regular expression is empty, all ingestions + // will be provided. + ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error) + // Write begins or resumes writes to a resource identified by a unique ref. + // Only one active stream may exist at a time for each ref. + // + // Once a write stream has started, it may only write to a single ref, thus + // once a stream is started, the ref may be omitted on subsequent writes. + // + // For any write transaction represented by a ref, only a single write may + // be made to a given offset. If overlapping writes occur, it is an error. + // Writes should be sequential and implementations may throw an error if + // this is required. + // + // If expected_digest is set and already part of the content store, the + // write will fail. + // + // When completed, the commit flag should be set to true. If expected size + // or digest is set, the content will be validated against those values. + Write(Content_WriteServer) error + // Abort cancels the ongoing write named in the request. Any resources + // associated with the write will be collected. + Abort(context.Context, *AbortRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedContentServer() +} + +// UnimplementedContentServer must be embedded to have forward compatible implementations. +type UnimplementedContentServer struct { +} + +func (UnimplementedContentServer) Info(context.Context, *InfoRequest) (*InfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} +func (UnimplementedContentServer) Update(context.Context, *UpdateRequest) (*UpdateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedContentServer) List(*ListContentRequest, Content_ListServer) error { + return status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedContentServer) Delete(context.Context, *DeleteContentRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedContentServer) Read(*ReadContentRequest, Content_ReadServer) error { + return status.Errorf(codes.Unimplemented, "method Read not implemented") +} +func (UnimplementedContentServer) Status(context.Context, *StatusRequest) (*StatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +} +func (UnimplementedContentServer) ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListStatuses not implemented") +} +func (UnimplementedContentServer) Write(Content_WriteServer) error { + return status.Errorf(codes.Unimplemented, "method Write not implemented") +} +func (UnimplementedContentServer) Abort(context.Context, *AbortRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Abort not implemented") +} +func (UnimplementedContentServer) mustEmbedUnimplementedContentServer() {} + +// UnsafeContentServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ContentServer will +// result in compilation errors. +type UnsafeContentServer interface { + mustEmbedUnimplementedContentServer() +} + +func RegisterContentServer(s grpc.ServiceRegistrar, srv ContentServer) { + s.RegisterService(&Content_ServiceDesc, srv) +} + +func _Content_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Info(ctx, req.(*InfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Update(ctx, req.(*UpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_List_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListContentRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ContentServer).List(m, &contentListServer{stream}) +} + +type Content_ListServer interface { + Send(*ListContentResponse) error + grpc.ServerStream +} + +type contentListServer struct { + grpc.ServerStream +} + +func (x *contentListServer) Send(m *ListContentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Content_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteContentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Delete(ctx, req.(*DeleteContentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_Read_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadContentRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ContentServer).Read(m, &contentReadServer{stream}) +} + +type Content_ReadServer interface { + Send(*ReadContentResponse) error + grpc.ServerStream +} + +type contentReadServer struct { + grpc.ServerStream +} + +func (x *contentReadServer) Send(m *ReadContentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Content_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_ListStatuses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListStatusesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).ListStatuses(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/ListStatuses", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).ListStatuses(ctx, req.(*ListStatusesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_Write_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ContentServer).Write(&contentWriteServer{stream}) +} + +type Content_WriteServer interface { + Send(*WriteContentResponse) error + Recv() (*WriteContentRequest, error) + grpc.ServerStream +} + +type contentWriteServer struct { + grpc.ServerStream +} + +func (x *contentWriteServer) Send(m *WriteContentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *contentWriteServer) Recv() (*WriteContentRequest, error) { + m := new(WriteContentRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Content_Abort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AbortRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Abort(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Abort", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Abort(ctx, req.(*AbortRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Content_ServiceDesc is the grpc.ServiceDesc for Content service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Content_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.content.v1.Content", + HandlerType: (*ContentServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Info", + Handler: _Content_Info_Handler, + }, + { + MethodName: "Update", + Handler: _Content_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Content_Delete_Handler, + }, + { + MethodName: "Status", + Handler: _Content_Status_Handler, + }, + { + MethodName: "ListStatuses", + Handler: _Content_ListStatuses_Handler, + }, + { + MethodName: "Abort", + Handler: _Content_Abort_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "List", + Handler: _Content_List_Handler, + ServerStreams: true, + }, + { + StreamName: "Read", + Handler: _Content_Read_Handler, + ServerStreams: true, + }, + { + StreamName: "Write", + Handler: _Content_Write_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "github.com/containerd/containerd/api/services/content/v1/content.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go index b1450ceb82..54df8b56d9 100644 --- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go @@ -1,121 +1,162 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/diff/v1/diff.proto package diff import ( - context "context" - fmt "fmt" types "github.com/containerd/containerd/api/types" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - types1 "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type ApplyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Diff is the descriptor of the diff to be extracted - Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"` - Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` - Payloads map[string]*types1.Any `protobuf:"bytes,3,rep,name=payloads,proto3" json:"payloads,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"` + Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` + Payloads map[string]*anypb.Any `protobuf:"bytes,3,rep,name=payloads,proto3" json:"payloads,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *ApplyRequest) Reset() { *m = ApplyRequest{} } -func (*ApplyRequest) ProtoMessage() {} -func (*ApplyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3b36a99e6faaa935, []int{0} -} -func (m *ApplyRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ApplyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ApplyRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ApplyRequest) Reset() { + *x = ApplyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ApplyRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplyRequest.Merge(m, src) -} -func (m *ApplyRequest) XXX_Size() int { - return m.Size() -} -func (m *ApplyRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ApplyRequest.DiscardUnknown(m) + +func (x *ApplyRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ApplyRequest proto.InternalMessageInfo +func (*ApplyRequest) ProtoMessage() {} + +func (x *ApplyRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyRequest.ProtoReflect.Descriptor instead. +func (*ApplyRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{0} +} + +func (x *ApplyRequest) GetDiff() *types.Descriptor { + if x != nil { + return x.Diff + } + return nil +} + +func (x *ApplyRequest) GetMounts() []*types.Mount { + if x != nil { + return x.Mounts + } + return nil +} + +func (x *ApplyRequest) GetPayloads() map[string]*anypb.Any { + if x != nil { + return x.Payloads + } + return nil +} type ApplyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Applied is the descriptor for the object which was applied. // If the input was a compressed blob then the result will be // the descriptor for the uncompressed blob. - Applied *types.Descriptor `protobuf:"bytes,1,opt,name=applied,proto3" json:"applied,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Applied *types.Descriptor `protobuf:"bytes,1,opt,name=applied,proto3" json:"applied,omitempty"` } -func (m *ApplyResponse) Reset() { *m = ApplyResponse{} } -func (*ApplyResponse) ProtoMessage() {} -func (*ApplyResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3b36a99e6faaa935, []int{1} -} -func (m *ApplyResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ApplyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ApplyResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ApplyResponse) Reset() { + *x = ApplyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ApplyResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplyResponse.Merge(m, src) -} -func (m *ApplyResponse) XXX_Size() int { - return m.Size() -} -func (m *ApplyResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ApplyResponse.DiscardUnknown(m) + +func (x *ApplyResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ApplyResponse proto.InternalMessageInfo +func (*ApplyResponse) ProtoMessage() {} + +func (x *ApplyResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResponse.ProtoReflect.Descriptor instead. +func (*ApplyResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{1} +} + +func (x *ApplyResponse) GetApplied() *types.Descriptor { + if x != nil { + return x.Applied + } + return nil +} type DiffRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Left are the mounts which represent the older copy // in which is the base of the computed changes. Left []*types.Mount `protobuf:"bytes,1,rep,name=left,proto3" json:"left,omitempty"` @@ -130,1537 +171,341 @@ type DiffRequest struct { Ref string `protobuf:"bytes,4,opt,name=ref,proto3" json:"ref,omitempty"` // Labels are the labels to apply to the generated content // on content store commit. - Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // SourceDateEpoch specifies the timestamp used for whiteouts to provide control for reproducibility. + // See also https://reproducible-builds.org/docs/source-date-epoch/ . + SourceDateEpoch *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=source_date_epoch,json=sourceDateEpoch,proto3" json:"source_date_epoch,omitempty"` } -func (m *DiffRequest) Reset() { *m = DiffRequest{} } -func (*DiffRequest) ProtoMessage() {} -func (*DiffRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3b36a99e6faaa935, []int{2} -} -func (m *DiffRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DiffRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DiffRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DiffRequest) Reset() { + *x = DiffRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DiffRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DiffRequest.Merge(m, src) -} -func (m *DiffRequest) XXX_Size() int { - return m.Size() -} -func (m *DiffRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DiffRequest.DiscardUnknown(m) + +func (x *DiffRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_DiffRequest proto.InternalMessageInfo +func (*DiffRequest) ProtoMessage() {} + +func (x *DiffRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiffRequest.ProtoReflect.Descriptor instead. +func (*DiffRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{2} +} + +func (x *DiffRequest) GetLeft() []*types.Mount { + if x != nil { + return x.Left + } + return nil +} + +func (x *DiffRequest) GetRight() []*types.Mount { + if x != nil { + return x.Right + } + return nil +} + +func (x *DiffRequest) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *DiffRequest) GetRef() string { + if x != nil { + return x.Ref + } + return "" +} + +func (x *DiffRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *DiffRequest) GetSourceDateEpoch() *timestamppb.Timestamp { + if x != nil { + return x.SourceDateEpoch + } + return nil +} type DiffResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Diff is the descriptor of the diff which can be applied - Diff *types.Descriptor `protobuf:"bytes,3,opt,name=diff,proto3" json:"diff,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Diff *types.Descriptor `protobuf:"bytes,3,opt,name=diff,proto3" json:"diff,omitempty"` +} + +func (x *DiffResponse) Reset() { + *x = DiffResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DiffResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DiffResponse) Reset() { *m = DiffResponse{} } func (*DiffResponse) ProtoMessage() {} + +func (x *DiffResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiffResponse.ProtoReflect.Descriptor instead. func (*DiffResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3b36a99e6faaa935, []int{3} -} -func (m *DiffResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DiffResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DiffResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DiffResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DiffResponse.Merge(m, src) -} -func (m *DiffResponse) XXX_Size() int { - return m.Size() -} -func (m *DiffResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DiffResponse.DiscardUnknown(m) + return file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{3} } -var xxx_messageInfo_DiffResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest") - proto.RegisterMapType((map[string]*types1.Any)(nil), "containerd.services.diff.v1.ApplyRequest.PayloadsEntry") - proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse") - proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry") - proto.RegisterType((*DiffResponse)(nil), "containerd.services.diff.v1.DiffResponse") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/diff/v1/diff.proto", fileDescriptor_3b36a99e6faaa935) -} - -var fileDescriptor_3b36a99e6faaa935 = []byte{ - // 526 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x41, 0x6f, 0xd3, 0x4c, - 0x10, 0x8d, 0xed, 0x24, 0xdf, 0x97, 0x49, 0x2b, 0xa1, 0x55, 0x24, 0x8c, 0x01, 0xab, 0xca, 0x29, - 0x2d, 0x62, 0x4d, 0x03, 0x2a, 0xd0, 0x5e, 0x5a, 0x54, 0xc4, 0xa5, 0x48, 0x60, 0x7a, 0x40, 0x20, - 0x81, 0x9c, 0x78, 0xed, 0xae, 0x70, 0xbc, 0x8b, 0x77, 0x1d, 0xc9, 0x37, 0xfe, 0x06, 0x67, 0x7e, - 0x0a, 0x97, 0x1e, 0x39, 0x72, 0xa4, 0xf9, 0x25, 0xc8, 0xeb, 0x75, 0x31, 0x02, 0x05, 0xc3, 0xc9, - 0x9b, 0x9d, 0xf7, 0xde, 0xce, 0xbc, 0x37, 0x0a, 0x1c, 0xc6, 0x54, 0x9e, 0xe5, 0x33, 0x3c, 0x67, - 0x0b, 0x6f, 0xce, 0x52, 0x19, 0xd0, 0x94, 0x64, 0x61, 0xf3, 0x18, 0x70, 0xea, 0x09, 0x92, 0x2d, - 0xe9, 0x9c, 0x08, 0x2f, 0xa4, 0x51, 0xe4, 0x2d, 0x77, 0xd5, 0x17, 0xf3, 0x8c, 0x49, 0x86, 0xae, - 0xff, 0xc0, 0xe2, 0x1a, 0x87, 0x55, 0x7d, 0xb9, 0xeb, 0x8c, 0x62, 0x16, 0x33, 0x85, 0xf3, 0xca, - 0x53, 0x45, 0x71, 0xae, 0xc5, 0x8c, 0xc5, 0x09, 0xf1, 0xd4, 0xaf, 0x59, 0x1e, 0x79, 0x41, 0x5a, - 0xe8, 0xd2, 0x5e, 0xab, 0x7e, 0x64, 0xc1, 0x89, 0xf0, 0x16, 0x2c, 0x4f, 0xa5, 0xe6, 0x1d, 0xfc, - 0x05, 0x2f, 0x24, 0x62, 0x9e, 0x51, 0x2e, 0x59, 0x56, 0x91, 0xc7, 0x1f, 0x4d, 0xd8, 0x38, 0xe2, - 0x3c, 0x29, 0x7c, 0xf2, 0x3e, 0x27, 0x42, 0xa2, 0x3b, 0xd0, 0x2d, 0x27, 0xb0, 0x8d, 0x2d, 0x63, - 0x32, 0x9c, 0xde, 0xc0, 0x8d, 0x11, 0x95, 0x04, 0x3e, 0xbe, 0x94, 0xf0, 0x15, 0x12, 0x79, 0xd0, - 0x57, 0xed, 0x08, 0xdb, 0xdc, 0xb2, 0x26, 0xc3, 0xe9, 0xd5, 0x5f, 0x39, 0x4f, 0xcb, 0xba, 0xaf, - 0x61, 0xe8, 0x05, 0xfc, 0xcf, 0x83, 0x22, 0x61, 0x41, 0x28, 0x6c, 0x4b, 0x51, 0xee, 0xe3, 0x35, - 0x4e, 0xe2, 0x66, 0x7f, 0xf8, 0x99, 0x66, 0x3e, 0x4e, 0x65, 0x56, 0xf8, 0x97, 0x42, 0xce, 0x73, - 0xd8, 0xfc, 0xa9, 0x84, 0xae, 0x80, 0xf5, 0x8e, 0x14, 0x6a, 0x8e, 0x81, 0x5f, 0x1e, 0xd1, 0x0e, - 0xf4, 0x96, 0x41, 0x92, 0x13, 0xdb, 0x54, 0xb3, 0x8d, 0x70, 0x95, 0x05, 0xae, 0xb3, 0xc0, 0x47, - 0x69, 0xe1, 0x57, 0x90, 0x7d, 0xf3, 0x81, 0x31, 0x7e, 0x02, 0x9b, 0xfa, 0x69, 0xc1, 0x59, 0x2a, - 0x08, 0xda, 0x83, 0xff, 0x02, 0xce, 0x13, 0x4a, 0xc2, 0x56, 0xf6, 0xd4, 0xe0, 0xf1, 0x27, 0x13, - 0x86, 0xc7, 0x34, 0x8a, 0x6a, 0x8f, 0x6f, 0x41, 0x37, 0x21, 0x91, 0xb4, 0x8d, 0xf5, 0x7e, 0x29, - 0x10, 0xba, 0x0d, 0xbd, 0x8c, 0xc6, 0x67, 0xf2, 0x4f, 0xee, 0x56, 0x28, 0x74, 0x13, 0x60, 0x41, - 0x42, 0x1a, 0xbc, 0x2d, 0x6b, 0xb6, 0xa5, 0xa6, 0x1f, 0xa8, 0x9b, 0xd3, 0x82, 0x93, 0xd2, 0x95, - 0x8c, 0x44, 0x76, 0xb7, 0x72, 0x25, 0x23, 0x11, 0x3a, 0x81, 0x7e, 0x12, 0xcc, 0x48, 0x22, 0xec, - 0x9e, 0x7a, 0xe0, 0xde, 0xda, 0x2c, 0x1a, 0x63, 0xe0, 0x13, 0x45, 0xab, 0x82, 0xd0, 0x1a, 0xce, - 0x43, 0x18, 0x36, 0xae, 0x7f, 0x13, 0xc2, 0xa8, 0x19, 0xc2, 0xa0, 0x69, 0xf7, 0x21, 0x6c, 0x54, - 0xea, 0xda, 0xed, 0x7a, 0x13, 0xad, 0xb6, 0x9b, 0x38, 0xfd, 0x6c, 0x40, 0xb7, 0x94, 0x40, 0x6f, - 0xa0, 0xa7, 0x92, 0x43, 0xdb, 0xad, 0x17, 0xcb, 0xd9, 0x69, 0x03, 0xd5, 0xad, 0xbd, 0xd6, 0xef, - 0x4c, 0xda, 0x7a, 0xe5, 0x6c, 0xb7, 0x40, 0x56, 0xe2, 0x8f, 0x4e, 0xcf, 0x2f, 0xdc, 0xce, 0xd7, - 0x0b, 0xb7, 0xf3, 0x61, 0xe5, 0x1a, 0xe7, 0x2b, 0xd7, 0xf8, 0xb2, 0x72, 0x8d, 0x6f, 0x2b, 0xd7, - 0x78, 0xb5, 0xff, 0x4f, 0xff, 0x58, 0x07, 0xe5, 0xf7, 0x65, 0x67, 0xd6, 0x57, 0x7b, 0x7e, 0xf7, - 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x85, 0x25, 0xb8, 0xf8, 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// DiffClient is the client API for Diff service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type DiffClient interface { - // Apply applies the content associated with the provided digests onto - // the provided mounts. Archive content will be extracted and - // decompressed if necessary. - Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) - // Diff creates a diff between the given mounts and uploads the result - // to the content store. - Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) -} - -type diffClient struct { - cc *grpc.ClientConn -} - -func NewDiffClient(cc *grpc.ClientConn) DiffClient { - return &diffClient{cc} -} - -func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) { - out := new(ApplyResponse) - err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Apply", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *diffClient) Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) { - out := new(DiffResponse) - err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Diff", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// DiffServer is the server API for Diff service. -type DiffServer interface { - // Apply applies the content associated with the provided digests onto - // the provided mounts. Archive content will be extracted and - // decompressed if necessary. - Apply(context.Context, *ApplyRequest) (*ApplyResponse, error) - // Diff creates a diff between the given mounts and uploads the result - // to the content store. - Diff(context.Context, *DiffRequest) (*DiffResponse, error) -} - -// UnimplementedDiffServer can be embedded to have forward compatible implementations. -type UnimplementedDiffServer struct { -} - -func (*UnimplementedDiffServer) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Apply not implemented") -} -func (*UnimplementedDiffServer) Diff(ctx context.Context, req *DiffRequest) (*DiffResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Diff not implemented") -} - -func RegisterDiffServer(s *grpc.Server, srv DiffServer) { - s.RegisterService(&_Diff_serviceDesc, srv) -} - -func _Diff_Apply_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ApplyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DiffServer).Apply(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.diff.v1.Diff/Apply", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DiffServer).Apply(ctx, req.(*ApplyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Diff_Diff_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DiffRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DiffServer).Diff(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.diff.v1.Diff/Diff", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DiffServer).Diff(ctx, req.(*DiffRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Diff_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.diff.v1.Diff", - HandlerType: (*DiffServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Apply", - Handler: _Diff_Apply_Handler, - }, - { - MethodName: "Diff", - Handler: _Diff_Diff_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/diff/v1/diff.proto", -} - -func (m *ApplyRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ApplyRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ApplyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Payloads) > 0 { - for k := range m.Payloads { - v := m.Payloads[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintDiff(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintDiff(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Mounts) > 0 { - for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Diff != nil { - { - size, err := m.Diff.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ApplyResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ApplyResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ApplyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Applied != nil { - { - size, err := m.Applied.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DiffRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DiffRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DiffRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintDiff(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintDiff(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintDiff(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintDiff(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0x22 - } - if len(m.MediaType) > 0 { - i -= len(m.MediaType) - copy(dAtA[i:], m.MediaType) - i = encodeVarintDiff(dAtA, i, uint64(len(m.MediaType))) - i-- - dAtA[i] = 0x1a - } - if len(m.Right) > 0 { - for iNdEx := len(m.Right) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Right[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Left) > 0 { - for iNdEx := len(m.Left) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Left[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DiffResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DiffResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Diff != nil { - { - size, err := m.Diff.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} - -func encodeVarintDiff(dAtA []byte, offset int, v uint64) int { - offset -= sovDiff(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ApplyRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Diff != nil { - l = m.Diff.Size() - n += 1 + l + sovDiff(uint64(l)) - } - if len(m.Mounts) > 0 { - for _, e := range m.Mounts { - l = e.Size() - n += 1 + l + sovDiff(uint64(l)) - } - } - if len(m.Payloads) > 0 { - for k, v := range m.Payloads { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovDiff(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovDiff(uint64(len(k))) + l - n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ApplyResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Applied != nil { - l = m.Applied.Size() - n += 1 + l + sovDiff(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DiffRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Left) > 0 { - for _, e := range m.Left { - l = e.Size() - n += 1 + l + sovDiff(uint64(l)) - } - } - if len(m.Right) > 0 { - for _, e := range m.Right { - l = e.Size() - n += 1 + l + sovDiff(uint64(l)) - } - } - l = len(m.MediaType) - if l > 0 { - n += 1 + l + sovDiff(uint64(l)) - } - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovDiff(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovDiff(uint64(len(k))) + 1 + len(v) + sovDiff(uint64(len(v))) - n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DiffResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Diff != nil { - l = m.Diff.Size() - n += 1 + l + sovDiff(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovDiff(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozDiff(x uint64) (n int) { - return sovDiff(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ApplyRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForMounts := "[]*Mount{" - for _, f := range this.Mounts { - repeatedStringForMounts += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForMounts += "}" - keysForPayloads := make([]string, 0, len(this.Payloads)) - for k, _ := range this.Payloads { - keysForPayloads = append(keysForPayloads, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPayloads) - mapStringForPayloads := "map[string]*types1.Any{" - for _, k := range keysForPayloads { - mapStringForPayloads += fmt.Sprintf("%v: %v,", k, this.Payloads[k]) - } - mapStringForPayloads += "}" - s := strings.Join([]string{`&ApplyRequest{`, - `Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`, - `Mounts:` + repeatedStringForMounts + `,`, - `Payloads:` + mapStringForPayloads + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ApplyResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ApplyResponse{`, - `Applied:` + strings.Replace(fmt.Sprintf("%v", this.Applied), "Descriptor", "types.Descriptor", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DiffRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForLeft := "[]*Mount{" - for _, f := range this.Left { - repeatedStringForLeft += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForLeft += "}" - repeatedStringForRight := "[]*Mount{" - for _, f := range this.Right { - repeatedStringForRight += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForRight += "}" - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&DiffRequest{`, - `Left:` + repeatedStringForLeft + `,`, - `Right:` + repeatedStringForRight + `,`, - `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DiffResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DiffResponse{`, - `Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringDiff(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ApplyRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ApplyRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Diff", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Diff == nil { - m.Diff = &types.Descriptor{} - } - if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mounts = append(m.Mounts, &types.Mount{}) - if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payloads", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Payloads == nil { - m.Payloads = make(map[string]*types1.Any) - } - var mapkey string - var mapvalue *types1.Any - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthDiff - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthDiff - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthDiff - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthDiff - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &types1.Any{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipDiff(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDiff - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Payloads[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDiff(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDiff - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *DiffResponse) GetDiff() *types.Descriptor { + if x != nil { + return x.Diff } return nil } -func (m *ApplyResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ApplyResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Applied", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Applied == nil { - m.Applied = &types.Descriptor{} - } - if err := m.Applied.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDiff(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDiff - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DiffRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DiffRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DiffRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Left", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Left = append(m.Left, &types.Mount{}) - if err := m.Left[len(m.Left)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Right", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Right = append(m.Right, &types.Mount{}) - if err := m.Right[len(m.Right)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MediaType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthDiff - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthDiff - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthDiff - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthDiff - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipDiff(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDiff - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDiff(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDiff - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +var File_github_com_containerd_containerd_api_services_diff_v1_diff_proto protoreflect.FileDescriptor - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DiffResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DiffResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DiffResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Diff", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Diff == nil { - m.Diff = &types.Descriptor{} - } - if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDiff(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDiff - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipDiff(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDiff - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDiff - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDiff - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDiff - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupDiff - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthDiff - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDesc = []byte{ + 0x0a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x64, 0x69, 0x66, 0x66, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x69, 0x66, 0x66, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x1b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, 0x66, 0x66, 0x2e, 0x76, 0x31, 0x1a, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x99, 0x02, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x30, 0x0a, 0x04, 0x64, 0x69, 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x64, + 0x69, 0x66, 0x66, 0x12, 0x2f, 0x0a, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x6d, 0x6f, + 0x75, 0x6e, 0x74, 0x73, 0x12, 0x53, 0x0a, 0x08, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, 0x66, + 0x66, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x08, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0d, 0x50, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x0d, + 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, + 0x07, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x07, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x65, 0x64, 0x22, 0xeb, 0x02, 0x0a, 0x0b, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x04, 0x6c, 0x65, 0x66, 0x74, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x04, 0x6c, 0x65, + 0x66, 0x74, 0x12, 0x2d, 0x0a, 0x05, 0x72, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x05, 0x72, 0x69, 0x67, 0x68, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, + 0x65, 0x66, 0x12, 0x4c, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, 0x66, 0x66, 0x2e, 0x76, 0x31, + 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x12, 0x46, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, + 0x61, 0x74, 0x65, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x40, 0x0a, 0x0c, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x64, 0x69, 0x66, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, + 0x04, 0x64, 0x69, 0x66, 0x66, 0x32, 0xc3, 0x01, 0x0a, 0x04, 0x44, 0x69, 0x66, 0x66, 0x12, 0x5e, + 0x0a, 0x05, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, + 0x66, 0x66, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, 0x66, 0x66, 0x2e, 0x76, 0x31, + 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, + 0x0a, 0x04, 0x44, 0x69, 0x66, 0x66, 0x12, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, 0x66, + 0x66, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, 0x66, 0x66, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3c, 0x5a, 0x3a, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x64, 0x69, 0x66, + 0x66, 0x2f, 0x76, 0x31, 0x3b, 0x64, 0x69, 0x66, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( - ErrInvalidLengthDiff = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDiff = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupDiff = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescData = file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_goTypes = []interface{}{ + (*ApplyRequest)(nil), // 0: containerd.services.diff.v1.ApplyRequest + (*ApplyResponse)(nil), // 1: containerd.services.diff.v1.ApplyResponse + (*DiffRequest)(nil), // 2: containerd.services.diff.v1.DiffRequest + (*DiffResponse)(nil), // 3: containerd.services.diff.v1.DiffResponse + nil, // 4: containerd.services.diff.v1.ApplyRequest.PayloadsEntry + nil, // 5: containerd.services.diff.v1.DiffRequest.LabelsEntry + (*types.Descriptor)(nil), // 6: containerd.types.Descriptor + (*types.Mount)(nil), // 7: containerd.types.Mount + (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp + (*anypb.Any)(nil), // 9: google.protobuf.Any +} +var file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_depIdxs = []int32{ + 6, // 0: containerd.services.diff.v1.ApplyRequest.diff:type_name -> containerd.types.Descriptor + 7, // 1: containerd.services.diff.v1.ApplyRequest.mounts:type_name -> containerd.types.Mount + 4, // 2: containerd.services.diff.v1.ApplyRequest.payloads:type_name -> containerd.services.diff.v1.ApplyRequest.PayloadsEntry + 6, // 3: containerd.services.diff.v1.ApplyResponse.applied:type_name -> containerd.types.Descriptor + 7, // 4: containerd.services.diff.v1.DiffRequest.left:type_name -> containerd.types.Mount + 7, // 5: containerd.services.diff.v1.DiffRequest.right:type_name -> containerd.types.Mount + 5, // 6: containerd.services.diff.v1.DiffRequest.labels:type_name -> containerd.services.diff.v1.DiffRequest.LabelsEntry + 8, // 7: containerd.services.diff.v1.DiffRequest.source_date_epoch:type_name -> google.protobuf.Timestamp + 6, // 8: containerd.services.diff.v1.DiffResponse.diff:type_name -> containerd.types.Descriptor + 9, // 9: containerd.services.diff.v1.ApplyRequest.PayloadsEntry.value:type_name -> google.protobuf.Any + 0, // 10: containerd.services.diff.v1.Diff.Apply:input_type -> containerd.services.diff.v1.ApplyRequest + 2, // 11: containerd.services.diff.v1.Diff.Diff:input_type -> containerd.services.diff.v1.DiffRequest + 1, // 12: containerd.services.diff.v1.Diff.Apply:output_type -> containerd.services.diff.v1.ApplyResponse + 3, // 13: containerd.services.diff.v1.Diff.Diff:output_type -> containerd.services.diff.v1.DiffResponse + 12, // [12:14] is the sub-list for method output_type + 10, // [10:12] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_init() } +func file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_init() { + if File_github_com_containerd_containerd_api_services_diff_v1_diff_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiffRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiffResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_diff_v1_diff_proto = out.File + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto index cc24e3f2ca..7191a562e2 100644 --- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto @@ -18,8 +18,8 @@ syntax = "proto3"; package containerd.services.diff.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; import "github.com/containerd/containerd/api/types/descriptor.proto"; @@ -73,6 +73,10 @@ message DiffRequest { // Labels are the labels to apply to the generated content // on content store commit. map labels = 5; + + // SourceDateEpoch specifies the timestamp used for whiteouts to provide control for reproducibility. + // See also https://reproducible-builds.org/docs/source-date-epoch/ . + google.protobuf.Timestamp source_date_epoch = 6; } message DiffResponse { diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff_grpc.pb.go new file mode 100644 index 0000000000..daa3b1801a --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff_grpc.pb.go @@ -0,0 +1,151 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/diff/v1/diff.proto + +package diff + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// DiffClient is the client API for Diff service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type DiffClient interface { + // Apply applies the content associated with the provided digests onto + // the provided mounts. Archive content will be extracted and + // decompressed if necessary. + Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) + // Diff creates a diff between the given mounts and uploads the result + // to the content store. + Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) +} + +type diffClient struct { + cc grpc.ClientConnInterface +} + +func NewDiffClient(cc grpc.ClientConnInterface) DiffClient { + return &diffClient{cc} +} + +func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) { + out := new(ApplyResponse) + err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Apply", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *diffClient) Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) { + out := new(DiffResponse) + err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Diff", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DiffServer is the server API for Diff service. +// All implementations must embed UnimplementedDiffServer +// for forward compatibility +type DiffServer interface { + // Apply applies the content associated with the provided digests onto + // the provided mounts. Archive content will be extracted and + // decompressed if necessary. + Apply(context.Context, *ApplyRequest) (*ApplyResponse, error) + // Diff creates a diff between the given mounts and uploads the result + // to the content store. + Diff(context.Context, *DiffRequest) (*DiffResponse, error) + mustEmbedUnimplementedDiffServer() +} + +// UnimplementedDiffServer must be embedded to have forward compatible implementations. +type UnimplementedDiffServer struct { +} + +func (UnimplementedDiffServer) Apply(context.Context, *ApplyRequest) (*ApplyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Apply not implemented") +} +func (UnimplementedDiffServer) Diff(context.Context, *DiffRequest) (*DiffResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Diff not implemented") +} +func (UnimplementedDiffServer) mustEmbedUnimplementedDiffServer() {} + +// UnsafeDiffServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DiffServer will +// result in compilation errors. +type UnsafeDiffServer interface { + mustEmbedUnimplementedDiffServer() +} + +func RegisterDiffServer(s grpc.ServiceRegistrar, srv DiffServer) { + s.RegisterService(&Diff_ServiceDesc, srv) +} + +func _Diff_Apply_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DiffServer).Apply(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.diff.v1.Diff/Apply", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DiffServer).Apply(ctx, req.(*ApplyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Diff_Diff_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DiffRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DiffServer).Diff(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.diff.v1.Diff/Diff", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DiffServer).Diff(ctx, req.(*DiffRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Diff_ServiceDesc is the grpc.ServiceDesc for Diff service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Diff_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.diff.v1.Diff", + HandlerType: (*DiffServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Apply", + Handler: _Diff_Apply_Handler, + }, + { + MethodName: "Diff", + Handler: _Diff_Diff_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/diff/v1/diff.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go index 4373f3bf2f..f083d9fce2 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go @@ -1,1372 +1,443 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/events/v1/events.proto package events import ( - context "context" - fmt "fmt" - github_com_containerd_typeurl "github.com/containerd/typeurl" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type PublishRequest struct { - Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - Event *types.Any `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` + Event *anypb.Any `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"` } -func (m *PublishRequest) Reset() { *m = PublishRequest{} } -func (*PublishRequest) ProtoMessage() {} -func (*PublishRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_43fcd20dc1642376, []int{0} -} -func (m *PublishRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PublishRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PublishRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *PublishRequest) Reset() { + *x = PublishRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *PublishRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PublishRequest.Merge(m, src) -} -func (m *PublishRequest) XXX_Size() int { - return m.Size() -} -func (m *PublishRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PublishRequest.DiscardUnknown(m) + +func (x *PublishRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_PublishRequest proto.InternalMessageInfo +func (*PublishRequest) ProtoMessage() {} + +func (x *PublishRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. +func (*PublishRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescGZIP(), []int{0} +} + +func (x *PublishRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *PublishRequest) GetEvent() *anypb.Any { + if x != nil { + return x.Event + } + return nil +} type ForwardRequest struct { - Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` } -func (m *ForwardRequest) Reset() { *m = ForwardRequest{} } -func (*ForwardRequest) ProtoMessage() {} -func (*ForwardRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_43fcd20dc1642376, []int{1} -} -func (m *ForwardRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ForwardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ForwardRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ForwardRequest) Reset() { + *x = ForwardRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ForwardRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ForwardRequest.Merge(m, src) -} -func (m *ForwardRequest) XXX_Size() int { - return m.Size() -} -func (m *ForwardRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ForwardRequest.DiscardUnknown(m) + +func (x *ForwardRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ForwardRequest proto.InternalMessageInfo +func (*ForwardRequest) ProtoMessage() {} + +func (x *ForwardRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForwardRequest.ProtoReflect.Descriptor instead. +func (*ForwardRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescGZIP(), []int{1} +} + +func (x *ForwardRequest) GetEnvelope() *Envelope { + if x != nil { + return x.Envelope + } + return nil +} type SubscribeRequest struct { - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func (m *SubscribeRequest) Reset() { *m = SubscribeRequest{} } -func (*SubscribeRequest) ProtoMessage() {} -func (*SubscribeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_43fcd20dc1642376, []int{2} -} -func (m *SubscribeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SubscribeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SubscribeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *SubscribeRequest) Reset() { + *x = SubscribeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *SubscribeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubscribeRequest.Merge(m, src) -} -func (m *SubscribeRequest) XXX_Size() int { - return m.Size() -} -func (m *SubscribeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SubscribeRequest.DiscardUnknown(m) + +func (x *SubscribeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_SubscribeRequest proto.InternalMessageInfo +func (*SubscribeRequest) ProtoMessage() {} + +func (x *SubscribeRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscribeRequest.ProtoReflect.Descriptor instead. +func (*SubscribeRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescGZIP(), []int{2} +} + +func (x *SubscribeRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil +} type Envelope struct { - Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"` - Event *types.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"` + Event *anypb.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` +} + +func (x *Envelope) Reset() { + *x = Envelope{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Envelope) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Envelope) Reset() { *m = Envelope{} } func (*Envelope) ProtoMessage() {} + +func (x *Envelope) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Envelope.ProtoReflect.Descriptor instead. func (*Envelope) Descriptor() ([]byte, []int) { - return fileDescriptor_43fcd20dc1642376, []int{3} -} -func (m *Envelope) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Envelope.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Envelope) XXX_Merge(src proto.Message) { - xxx_messageInfo_Envelope.Merge(m, src) -} -func (m *Envelope) XXX_Size() int { - return m.Size() -} -func (m *Envelope) XXX_DiscardUnknown() { - xxx_messageInfo_Envelope.DiscardUnknown(m) + return file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescGZIP(), []int{3} } -var xxx_messageInfo_Envelope proto.InternalMessageInfo - -func init() { - proto.RegisterType((*PublishRequest)(nil), "containerd.services.events.v1.PublishRequest") - proto.RegisterType((*ForwardRequest)(nil), "containerd.services.events.v1.ForwardRequest") - proto.RegisterType((*SubscribeRequest)(nil), "containerd.services.events.v1.SubscribeRequest") - proto.RegisterType((*Envelope)(nil), "containerd.services.events.v1.Envelope") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/events.proto", fileDescriptor_43fcd20dc1642376) -} - -var fileDescriptor_43fcd20dc1642376 = []byte{ - // 466 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x8e, 0xd3, 0x30, - 0x14, 0x85, 0xeb, 0xf9, 0x6d, 0x3c, 0xd2, 0x08, 0x45, 0x15, 0x2a, 0x01, 0xd2, 0xaa, 0x1b, 0x2a, - 0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x54, 0xb1, 0x4b, 0xd2, 0xdb, - 0xd4, 0x52, 0x62, 0x9b, 0xd8, 0x09, 0x9a, 0xdd, 0x3c, 0x02, 0x1b, 0xde, 0x84, 0x0d, 0x6f, 0xd0, - 0x25, 0x4b, 0x56, 0xc0, 0xf4, 0x49, 0x50, 0x13, 0xbb, 0x61, 0x3a, 0x40, 0x10, 0xbb, 0x6b, 0xdf, - 0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x22, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2, - 0x4c, 0x05, 0x94, 0x41, 0x36, 0xfb, 0xb5, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a, - 0x50, 0x00, 0x53, 0xd2, 0x2b, 0x4e, 0x74, 0x45, 0x44, 0xc6, 0x15, 0xb7, 0x6f, 0xd7, 0x7a, 0x62, - 0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0xe7, 0x9e, 0x48, 0xf2, - 0x98, 0x32, 0x6f, 0x4e, 0x21, 0x99, 0x89, 0x40, 0x2d, 0xaa, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e, - 0x96, 0xde, 0xba, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad, - 0x9b, 0xdb, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x76, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51, - 0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0x85, 0x0f, 0xef, 0x72, 0x90, 0xca, 0xee, - 0xe0, 0x7d, 0xc5, 0x05, 0x8d, 0xba, 0xa8, 0x8f, 0x86, 0x96, 0x5f, 0x2d, 0xec, 0xbb, 0x78, 0xbf, - 0x9c, 0xb2, 0xbb, 0xd3, 0x47, 0xc3, 0xa3, 0x51, 0x87, 0x54, 0x60, 0x62, 0xc0, 0xe4, 0x19, 0x3b, - 0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0xf7, 0x41, 0x36, 0x33, 0xcc, 0xe7, 0xb8, - 0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0xaf, 0x46, 0x92, 0x89, 0x96, - 0xfb, 0x9b, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e, - 0x3e, 0x9c, 0xd3, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x42, - 0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0x9b, 0xc1, 0xf5, 0x07, 0x38, 0x57, 0x26, 0x78, 0x6d, 0x14, - 0xe3, 0xf6, 0xf2, 0x5b, 0xaf, 0xf5, 0xe1, 0x7b, 0x0f, 0xf9, 0xf5, 0x31, 0xfb, 0x16, 0xb6, 0x58, - 0x90, 0x82, 0x14, 0x41, 0x04, 0xa5, 0x0b, 0x96, 0x5f, 0x6f, 0xd4, 0xae, 0xed, 0xfe, 0xd6, 0xb5, - 0xbd, 0x46, 0xd7, 0x1e, 0xed, 0x9d, 0x7f, 0xee, 0xa1, 0xd1, 0xc7, 0x1d, 0x7c, 0x30, 0x29, 0x5d, - 0xb0, 0x4f, 0xf1, 0xa1, 0x8e, 0xc6, 0xbe, 0xdf, 0xe0, 0xd6, 0xe5, 0x08, 0x9d, 0xeb, 0x57, 0xee, - 0x99, 0xac, 0xdf, 0xc4, 0x9a, 0xa8, 0x83, 0x69, 0x24, 0x5e, 0x0e, 0xf0, 0x8f, 0xc4, 0x18, 0x5b, - 0x9b, 0x4c, 0x6c, 0xaf, 0x81, 0xb9, 0x9d, 0x9e, 0xf3, 0xaf, 0x8f, 0xe0, 0x01, 0x1a, 0x4f, 0x97, - 0x17, 0x6e, 0xeb, 0xeb, 0x85, 0xdb, 0x3a, 0x5f, 0xb9, 0x68, 0xb9, 0x72, 0xd1, 0x97, 0x95, 0x8b, - 0x7e, 0xac, 0x5c, 0xf4, 0xf6, 0xc9, 0x7f, 0xfe, 0xd7, 0x8f, 0xab, 0x6a, 0xda, 0x9a, 0xa2, 0xf0, - 0xa0, 0x1c, 0xeb, 0xe1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xbf, 0x19, 0xa6, 0x24, 0x04, - 0x00, 0x00, -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *Envelope) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - // unhandled: timestamp - case "namespace": - return string(m.Namespace), len(m.Namespace) > 0 - case "topic": - return string(m.Topic), len(m.Topic) > 0 - case "event": - decoded, err := github_com_containerd_typeurl.UnmarshalAny(m.Event) - if err != nil { - return "", false - } - - adaptor, ok := decoded.(interface{ Field([]string) (string, bool) }) - if !ok { - return "", false - } - return adaptor.Field(fieldpath[1:]) - } - return "", false -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// EventsClient is the client API for Events service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type EventsClient interface { - // Publish an event to a topic. - // - // The event will be packed into a timestamp envelope with the namespace - // introspected from the context. The envelope will then be dispatched. - Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*types.Empty, error) - // Forward sends an event that has already been packaged into an envelope - // with a timestamp and namespace. - // - // This is useful if earlier timestamping is required or when forwarding on - // behalf of another component, namespace or publisher. - Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*types.Empty, error) - // Subscribe to a stream of events, possibly returning only that match any - // of the provided filters. - // - // Unlike many other methods in containerd, subscribers will get messages - // from all namespaces unless otherwise specified. If this is not desired, - // a filter can be provided in the format 'namespace==' to - // restrict the received events. - Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) -} - -type eventsClient struct { - cc *grpc.ClientConn -} - -func NewEventsClient(cc *grpc.ClientConn) EventsClient { - return &eventsClient{cc} -} - -func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Publish", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Forward", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *eventsClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) { - stream, err := c.cc.NewStream(ctx, &_Events_serviceDesc.Streams[0], "/containerd.services.events.v1.Events/Subscribe", opts...) - if err != nil { - return nil, err - } - x := &eventsSubscribeClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Events_SubscribeClient interface { - Recv() (*Envelope, error) - grpc.ClientStream -} - -type eventsSubscribeClient struct { - grpc.ClientStream -} - -func (x *eventsSubscribeClient) Recv() (*Envelope, error) { - m := new(Envelope) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// EventsServer is the server API for Events service. -type EventsServer interface { - // Publish an event to a topic. - // - // The event will be packed into a timestamp envelope with the namespace - // introspected from the context. The envelope will then be dispatched. - Publish(context.Context, *PublishRequest) (*types.Empty, error) - // Forward sends an event that has already been packaged into an envelope - // with a timestamp and namespace. - // - // This is useful if earlier timestamping is required or when forwarding on - // behalf of another component, namespace or publisher. - Forward(context.Context, *ForwardRequest) (*types.Empty, error) - // Subscribe to a stream of events, possibly returning only that match any - // of the provided filters. - // - // Unlike many other methods in containerd, subscribers will get messages - // from all namespaces unless otherwise specified. If this is not desired, - // a filter can be provided in the format 'namespace==' to - // restrict the received events. - Subscribe(*SubscribeRequest, Events_SubscribeServer) error -} - -// UnimplementedEventsServer can be embedded to have forward compatible implementations. -type UnimplementedEventsServer struct { -} - -func (*UnimplementedEventsServer) Publish(ctx context.Context, req *PublishRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented") -} -func (*UnimplementedEventsServer) Forward(ctx context.Context, req *ForwardRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Forward not implemented") -} -func (*UnimplementedEventsServer) Subscribe(req *SubscribeRequest, srv Events_SubscribeServer) error { - return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") -} - -func RegisterEventsServer(s *grpc.Server, srv EventsServer) { - s.RegisterService(&_Events_serviceDesc, srv) -} - -func _Events_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PublishRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(EventsServer).Publish(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.events.v1.Events/Publish", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(EventsServer).Publish(ctx, req.(*PublishRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Events_Forward_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ForwardRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(EventsServer).Forward(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.events.v1.Events/Forward", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(EventsServer).Forward(ctx, req.(*ForwardRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Events_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SubscribeRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(EventsServer).Subscribe(m, &eventsSubscribeServer{stream}) -} - -type Events_SubscribeServer interface { - Send(*Envelope) error - grpc.ServerStream -} - -type eventsSubscribeServer struct { - grpc.ServerStream -} - -func (x *eventsSubscribeServer) Send(m *Envelope) error { - return x.ServerStream.SendMsg(m) -} - -var _Events_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.events.v1.Events", - HandlerType: (*EventsServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Publish", - Handler: _Events_Publish_Handler, - }, - { - MethodName: "Forward", - Handler: _Events_Forward_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Subscribe", - Handler: _Events_Subscribe_Handler, - ServerStreams: true, - }, - }, - Metadata: "github.com/containerd/containerd/api/services/events/v1/events.proto", -} - -func (m *PublishRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PublishRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PublishRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Event != nil { - { - size, err := m.Event.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvents(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Topic) > 0 { - i -= len(m.Topic) - copy(dAtA[i:], m.Topic) - i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ForwardRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ForwardRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ForwardRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Envelope != nil { - { - size, err := m.Envelope.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvents(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SubscribeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SubscribeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SubscribeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintEvents(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Envelope) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Envelope) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Envelope) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Event != nil { - { - size, err := m.Event.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvents(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.Topic) > 0 { - i -= len(m.Topic) - copy(dAtA[i:], m.Topic) - i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic))) - i-- - dAtA[i] = 0x1a - } - if len(m.Namespace) > 0 { - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintEvents(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - } - n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err4 != nil { - return 0, err4 - } - i -= n4 - i = encodeVarintEvents(dAtA, i, uint64(n4)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { - offset -= sovEvents(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PublishRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Topic) - if l > 0 { - n += 1 + l + sovEvents(uint64(l)) - } - if m.Event != nil { - l = m.Event.Size() - n += 1 + l + sovEvents(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ForwardRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Envelope != nil { - l = m.Envelope.Size() - n += 1 + l + sovEvents(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SubscribeRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovEvents(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Envelope) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovEvents(uint64(l)) - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovEvents(uint64(l)) - } - l = len(m.Topic) - if l > 0 { - n += 1 + l + sovEvents(uint64(l)) - } - if m.Event != nil { - l = m.Event.Size() - n += 1 + l + sovEvents(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovEvents(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozEvents(x uint64) (n int) { - return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PublishRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PublishRequest{`, - `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, - `Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ForwardRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ForwardRequest{`, - `Envelope:` + strings.Replace(this.Envelope.String(), "Envelope", "Envelope", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *SubscribeRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubscribeRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Envelope) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Envelope{`, - `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, - `Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringEvents(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PublishRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PublishRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PublishRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Topic = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Event == nil { - m.Event = &types.Any{} - } - if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Envelope) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } -func (m *ForwardRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ForwardRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ForwardRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Envelope", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Envelope == nil { - m.Envelope = &Envelope{} - } - if err := m.Envelope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Envelope) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *Envelope) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *Envelope) GetEvent() *anypb.Any { + if x != nil { + return x.Event } return nil } -func (m *SubscribeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubscribeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubscribeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Envelope) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Envelope: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Envelope: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Topic = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Event == nil { - m.Event = &types.Any{} - } - if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +var File_github_com_containerd_containerd_api_services_events_v1_events_proto protoreflect.FileDescriptor - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipEvents(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvents - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvents - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvents - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthEvents - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupEvents - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthEvents - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, + 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x52, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x22, 0x55, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, + 0x65, 0x52, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x22, 0x2c, 0x0a, 0x10, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x08, 0x45, 0x6e, + 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x3a, 0x04, 0x80, 0xb9, 0x1f, 0x01, 0x32, 0x95, 0x02, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x50, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x2d, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x12, 0x50, 0x0a, 0x07, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x12, 0x2d, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x46, + 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x62, 0x65, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x30, 0x01, 0x42, 0x40, + 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescData = file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_github_com_containerd_containerd_api_services_events_v1_events_proto_goTypes = []interface{}{ + (*PublishRequest)(nil), // 0: containerd.services.events.v1.PublishRequest + (*ForwardRequest)(nil), // 1: containerd.services.events.v1.ForwardRequest + (*SubscribeRequest)(nil), // 2: containerd.services.events.v1.SubscribeRequest + (*Envelope)(nil), // 3: containerd.services.events.v1.Envelope + (*anypb.Any)(nil), // 4: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 6: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_events_v1_events_proto_depIdxs = []int32{ + 4, // 0: containerd.services.events.v1.PublishRequest.event:type_name -> google.protobuf.Any + 3, // 1: containerd.services.events.v1.ForwardRequest.envelope:type_name -> containerd.services.events.v1.Envelope + 5, // 2: containerd.services.events.v1.Envelope.timestamp:type_name -> google.protobuf.Timestamp + 4, // 3: containerd.services.events.v1.Envelope.event:type_name -> google.protobuf.Any + 0, // 4: containerd.services.events.v1.Events.Publish:input_type -> containerd.services.events.v1.PublishRequest + 1, // 5: containerd.services.events.v1.Events.Forward:input_type -> containerd.services.events.v1.ForwardRequest + 2, // 6: containerd.services.events.v1.Events.Subscribe:input_type -> containerd.services.events.v1.SubscribeRequest + 6, // 7: containerd.services.events.v1.Events.Publish:output_type -> google.protobuf.Empty + 6, // 8: containerd.services.events.v1.Events.Forward:output_type -> google.protobuf.Empty + 3, // 9: containerd.services.events.v1.Events.Subscribe:output_type -> containerd.services.events.v1.Envelope + 7, // [7:10] is the sub-list for method output_type + 4, // [4:7] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_events_v1_events_proto_init() } +func file_github_com_containerd_containerd_api_services_events_v1_events_proto_init() { + if File_github_com_containerd_containerd_api_services_events_v1_events_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ForwardRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscribeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Envelope); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_events_v1_events_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_events_v1_events_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_events_v1_events_proto = out.File + file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_events_v1_events_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_events_v1_events_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto index 9a2444e54c..3e0f11ffb8 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto +++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto @@ -18,8 +18,7 @@ syntax = "proto3"; package containerd.services.events.v1; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; -import weak "gogoproto/gogo.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; @@ -65,7 +64,7 @@ message SubscribeRequest { message Envelope { option (containerd.plugin.fieldpath) = true; - google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 1; string namespace = 2; string topic = 3; google.protobuf.Any event = 4; diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/events_grpc.pb.go new file mode 100644 index 0000000000..768306555c --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events_grpc.pb.go @@ -0,0 +1,238 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/events/v1/events.proto + +package events + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// EventsClient is the client API for Events service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type EventsClient interface { + // Publish an event to a topic. + // + // The event will be packed into a timestamp envelope with the namespace + // introspected from the context. The envelope will then be dispatched. + Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Forward sends an event that has already been packaged into an envelope + // with a timestamp and namespace. + // + // This is useful if earlier timestamping is required or when forwarding on + // behalf of another component, namespace or publisher. + Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Subscribe to a stream of events, possibly returning only that match any + // of the provided filters. + // + // Unlike many other methods in containerd, subscribers will get messages + // from all namespaces unless otherwise specified. If this is not desired, + // a filter can be provided in the format 'namespace==' to + // restrict the received events. + Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) +} + +type eventsClient struct { + cc grpc.ClientConnInterface +} + +func NewEventsClient(cc grpc.ClientConnInterface) EventsClient { + return &eventsClient{cc} +} + +func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Publish", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Forward", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *eventsClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &Events_ServiceDesc.Streams[0], "/containerd.services.events.v1.Events/Subscribe", opts...) + if err != nil { + return nil, err + } + x := &eventsSubscribeClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Events_SubscribeClient interface { + Recv() (*Envelope, error) + grpc.ClientStream +} + +type eventsSubscribeClient struct { + grpc.ClientStream +} + +func (x *eventsSubscribeClient) Recv() (*Envelope, error) { + m := new(Envelope) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// EventsServer is the server API for Events service. +// All implementations must embed UnimplementedEventsServer +// for forward compatibility +type EventsServer interface { + // Publish an event to a topic. + // + // The event will be packed into a timestamp envelope with the namespace + // introspected from the context. The envelope will then be dispatched. + Publish(context.Context, *PublishRequest) (*emptypb.Empty, error) + // Forward sends an event that has already been packaged into an envelope + // with a timestamp and namespace. + // + // This is useful if earlier timestamping is required or when forwarding on + // behalf of another component, namespace or publisher. + Forward(context.Context, *ForwardRequest) (*emptypb.Empty, error) + // Subscribe to a stream of events, possibly returning only that match any + // of the provided filters. + // + // Unlike many other methods in containerd, subscribers will get messages + // from all namespaces unless otherwise specified. If this is not desired, + // a filter can be provided in the format 'namespace==' to + // restrict the received events. + Subscribe(*SubscribeRequest, Events_SubscribeServer) error + mustEmbedUnimplementedEventsServer() +} + +// UnimplementedEventsServer must be embedded to have forward compatible implementations. +type UnimplementedEventsServer struct { +} + +func (UnimplementedEventsServer) Publish(context.Context, *PublishRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented") +} +func (UnimplementedEventsServer) Forward(context.Context, *ForwardRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Forward not implemented") +} +func (UnimplementedEventsServer) Subscribe(*SubscribeRequest, Events_SubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") +} +func (UnimplementedEventsServer) mustEmbedUnimplementedEventsServer() {} + +// UnsafeEventsServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to EventsServer will +// result in compilation errors. +type UnsafeEventsServer interface { + mustEmbedUnimplementedEventsServer() +} + +func RegisterEventsServer(s grpc.ServiceRegistrar, srv EventsServer) { + s.RegisterService(&Events_ServiceDesc, srv) +} + +func _Events_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PublishRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EventsServer).Publish(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.events.v1.Events/Publish", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EventsServer).Publish(ctx, req.(*PublishRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Events_Forward_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ForwardRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EventsServer).Forward(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.events.v1.Events/Forward", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EventsServer).Forward(ctx, req.(*ForwardRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Events_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(EventsServer).Subscribe(m, &eventsSubscribeServer{stream}) +} + +type Events_SubscribeServer interface { + Send(*Envelope) error + grpc.ServerStream +} + +type eventsSubscribeServer struct { + grpc.ServerStream +} + +func (x *eventsSubscribeServer) Send(m *Envelope) error { + return x.ServerStream.SendMsg(m) +} + +// Events_ServiceDesc is the grpc.ServiceDesc for Events service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Events_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.events.v1.Events", + HandlerType: (*EventsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Publish", + Handler: _Events_Publish_Handler, + }, + { + MethodName: "Forward", + Handler: _Events_Forward_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Subscribe", + Handler: _Events_Subscribe_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/containerd/containerd/api/services/events/v1/events.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go index ee170f2c9d..52aff3dd2f 100644 --- a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go @@ -1,40 +1,49 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/images/v1/images.proto package images import ( - context "context" - fmt "fmt" types "github.com/containerd/containerd/api/types" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types1 "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Image struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Name provides a unique name for the image. // // Containerd treats this as the primary identifier. @@ -46,289 +55,396 @@ type Image struct { // The combined size of a key/value pair cannot exceed 4096 bytes. Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Target describes the content entry point of the image. - Target types.Descriptor `protobuf:"bytes,3,opt,name=target,proto3" json:"target"` + Target *types.Descriptor `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` // CreatedAt is the time the image was first created. - CreatedAt time.Time `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` // UpdatedAt is the last time the image was mutated. - UpdatedAt time.Time `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` } -func (m *Image) Reset() { *m = Image{} } -func (*Image) ProtoMessage() {} -func (*Image) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{0} -} -func (m *Image) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Image.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Image) Reset() { + *x = Image{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Image) XXX_Merge(src proto.Message) { - xxx_messageInfo_Image.Merge(m, src) -} -func (m *Image) XXX_Size() int { - return m.Size() -} -func (m *Image) XXX_DiscardUnknown() { - xxx_messageInfo_Image.DiscardUnknown(m) + +func (x *Image) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_Image proto.InternalMessageInfo +func (*Image) ProtoMessage() {} + +func (x *Image) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Image.ProtoReflect.Descriptor instead. +func (*Image) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{0} +} + +func (x *Image) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Image) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Image) GetTarget() *types.Descriptor { + if x != nil { + return x.Target + } + return nil +} + +func (x *Image) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *Image) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} type GetImageRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (m *GetImageRequest) Reset() { *m = GetImageRequest{} } -func (*GetImageRequest) ProtoMessage() {} -func (*GetImageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{1} -} -func (m *GetImageRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetImageRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetImageRequest) Reset() { + *x = GetImageRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *GetImageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetImageRequest.Merge(m, src) -} -func (m *GetImageRequest) XXX_Size() int { - return m.Size() -} -func (m *GetImageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetImageRequest.DiscardUnknown(m) + +func (x *GetImageRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_GetImageRequest proto.InternalMessageInfo +func (*GetImageRequest) ProtoMessage() {} + +func (x *GetImageRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetImageRequest.ProtoReflect.Descriptor instead. +func (*GetImageRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{1} +} + +func (x *GetImageRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} type GetImageResponse struct { - Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` } -func (m *GetImageResponse) Reset() { *m = GetImageResponse{} } -func (*GetImageResponse) ProtoMessage() {} -func (*GetImageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{2} -} -func (m *GetImageResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetImageResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetImageResponse) Reset() { + *x = GetImageResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *GetImageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetImageResponse.Merge(m, src) -} -func (m *GetImageResponse) XXX_Size() int { - return m.Size() -} -func (m *GetImageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetImageResponse.DiscardUnknown(m) + +func (x *GetImageResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_GetImageResponse proto.InternalMessageInfo +func (*GetImageResponse) ProtoMessage() {} + +func (x *GetImageResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetImageResponse.ProtoReflect.Descriptor instead. +func (*GetImageResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{2} +} + +func (x *GetImageResponse) GetImage() *Image { + if x != nil { + return x.Image + } + return nil +} type CreateImageRequest struct { - Image Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + SourceDateEpoch *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=source_date_epoch,json=sourceDateEpoch,proto3" json:"source_date_epoch,omitempty"` } -func (m *CreateImageRequest) Reset() { *m = CreateImageRequest{} } -func (*CreateImageRequest) ProtoMessage() {} -func (*CreateImageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{3} -} -func (m *CreateImageRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateImageRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CreateImageRequest) Reset() { + *x = CreateImageRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CreateImageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateImageRequest.Merge(m, src) -} -func (m *CreateImageRequest) XXX_Size() int { - return m.Size() -} -func (m *CreateImageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateImageRequest.DiscardUnknown(m) + +func (x *CreateImageRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CreateImageRequest proto.InternalMessageInfo +func (*CreateImageRequest) ProtoMessage() {} + +func (x *CreateImageRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateImageRequest.ProtoReflect.Descriptor instead. +func (*CreateImageRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateImageRequest) GetImage() *Image { + if x != nil { + return x.Image + } + return nil +} + +func (x *CreateImageRequest) GetSourceDateEpoch() *timestamppb.Timestamp { + if x != nil { + return x.SourceDateEpoch + } + return nil +} type CreateImageResponse struct { - Image Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` } -func (m *CreateImageResponse) Reset() { *m = CreateImageResponse{} } -func (*CreateImageResponse) ProtoMessage() {} -func (*CreateImageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{4} -} -func (m *CreateImageResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateImageResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CreateImageResponse) Reset() { + *x = CreateImageResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CreateImageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateImageResponse.Merge(m, src) -} -func (m *CreateImageResponse) XXX_Size() int { - return m.Size() -} -func (m *CreateImageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateImageResponse.DiscardUnknown(m) + +func (x *CreateImageResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CreateImageResponse proto.InternalMessageInfo +func (*CreateImageResponse) ProtoMessage() {} + +func (x *CreateImageResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateImageResponse.ProtoReflect.Descriptor instead. +func (*CreateImageResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{4} +} + +func (x *CreateImageResponse) GetImage() *Image { + if x != nil { + return x.Image + } + return nil +} type UpdateImageRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Image provides a full or partial image for update. // // The name field must be set or an error will be returned. - Image Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image"` + Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. - UpdateMask *types1.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + SourceDateEpoch *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=source_date_epoch,json=sourceDateEpoch,proto3" json:"source_date_epoch,omitempty"` } -func (m *UpdateImageRequest) Reset() { *m = UpdateImageRequest{} } -func (*UpdateImageRequest) ProtoMessage() {} -func (*UpdateImageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{5} -} -func (m *UpdateImageRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateImageRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateImageRequest) Reset() { + *x = UpdateImageRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateImageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateImageRequest.Merge(m, src) -} -func (m *UpdateImageRequest) XXX_Size() int { - return m.Size() -} -func (m *UpdateImageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateImageRequest.DiscardUnknown(m) + +func (x *UpdateImageRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_UpdateImageRequest proto.InternalMessageInfo +func (*UpdateImageRequest) ProtoMessage() {} + +func (x *UpdateImageRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateImageRequest.ProtoReflect.Descriptor instead. +func (*UpdateImageRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{5} +} + +func (x *UpdateImageRequest) GetImage() *Image { + if x != nil { + return x.Image + } + return nil +} + +func (x *UpdateImageRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +func (x *UpdateImageRequest) GetSourceDateEpoch() *timestamppb.Timestamp { + if x != nil { + return x.SourceDateEpoch + } + return nil +} type UpdateImageResponse struct { - Image Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` } -func (m *UpdateImageResponse) Reset() { *m = UpdateImageResponse{} } -func (*UpdateImageResponse) ProtoMessage() {} -func (*UpdateImageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{6} -} -func (m *UpdateImageResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateImageResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateImageResponse) Reset() { + *x = UpdateImageResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateImageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateImageResponse.Merge(m, src) -} -func (m *UpdateImageResponse) XXX_Size() int { - return m.Size() -} -func (m *UpdateImageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateImageResponse.DiscardUnknown(m) + +func (x *UpdateImageResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_UpdateImageResponse proto.InternalMessageInfo +func (*UpdateImageResponse) ProtoMessage() {} + +func (x *UpdateImageResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateImageResponse.ProtoReflect.Descriptor instead. +func (*UpdateImageResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{6} +} + +func (x *UpdateImageResponse) GetImage() *Image { + if x != nil { + return x.Image + } + return nil +} type ListImagesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Filters contains one or more filters using the syntax defined in the // containerd filter package. // @@ -339,2400 +455,492 @@ type ListImagesRequest struct { // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func (m *ListImagesRequest) Reset() { *m = ListImagesRequest{} } -func (*ListImagesRequest) ProtoMessage() {} -func (*ListImagesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{7} -} -func (m *ListImagesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListImagesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListImagesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListImagesRequest) Reset() { + *x = ListImagesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListImagesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListImagesRequest.Merge(m, src) -} -func (m *ListImagesRequest) XXX_Size() int { - return m.Size() -} -func (m *ListImagesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListImagesRequest.DiscardUnknown(m) + +func (x *ListImagesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListImagesRequest proto.InternalMessageInfo +func (*ListImagesRequest) ProtoMessage() {} + +func (x *ListImagesRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListImagesRequest.ProtoReflect.Descriptor instead. +func (*ListImagesRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{7} +} + +func (x *ListImagesRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil +} type ListImagesResponse struct { - Images []Image `protobuf:"bytes,1,rep,name=images,proto3" json:"images"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Images []*Image `protobuf:"bytes,1,rep,name=images,proto3" json:"images,omitempty"` } -func (m *ListImagesResponse) Reset() { *m = ListImagesResponse{} } -func (*ListImagesResponse) ProtoMessage() {} -func (*ListImagesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{8} -} -func (m *ListImagesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListImagesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListImagesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListImagesResponse) Reset() { + *x = ListImagesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListImagesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListImagesResponse.Merge(m, src) -} -func (m *ListImagesResponse) XXX_Size() int { - return m.Size() -} -func (m *ListImagesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListImagesResponse.DiscardUnknown(m) + +func (x *ListImagesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListImagesResponse proto.InternalMessageInfo +func (*ListImagesResponse) ProtoMessage() {} + +func (x *ListImagesResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListImagesResponse.ProtoReflect.Descriptor instead. +func (*ListImagesResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{8} +} + +func (x *ListImagesResponse) GetImages() []*Image { + if x != nil { + return x.Images + } + return nil +} type DeleteImageRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Sync indicates that the delete and cleanup should be done // synchronously before returning to the caller // // Default is false - Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"` +} + +func (x *DeleteImageRequest) Reset() { + *x = DeleteImageRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteImageRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteImageRequest) Reset() { *m = DeleteImageRequest{} } func (*DeleteImageRequest) ProtoMessage() {} + +func (x *DeleteImageRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteImageRequest.ProtoReflect.Descriptor instead. func (*DeleteImageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{9} -} -func (m *DeleteImageRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteImageRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteImageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteImageRequest.Merge(m, src) -} -func (m *DeleteImageRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteImageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteImageRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteImageRequest proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Image)(nil), "containerd.services.images.v1.Image") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.images.v1.Image.LabelsEntry") - proto.RegisterType((*GetImageRequest)(nil), "containerd.services.images.v1.GetImageRequest") - proto.RegisterType((*GetImageResponse)(nil), "containerd.services.images.v1.GetImageResponse") - proto.RegisterType((*CreateImageRequest)(nil), "containerd.services.images.v1.CreateImageRequest") - proto.RegisterType((*CreateImageResponse)(nil), "containerd.services.images.v1.CreateImageResponse") - proto.RegisterType((*UpdateImageRequest)(nil), "containerd.services.images.v1.UpdateImageRequest") - proto.RegisterType((*UpdateImageResponse)(nil), "containerd.services.images.v1.UpdateImageResponse") - proto.RegisterType((*ListImagesRequest)(nil), "containerd.services.images.v1.ListImagesRequest") - proto.RegisterType((*ListImagesResponse)(nil), "containerd.services.images.v1.ListImagesResponse") - proto.RegisterType((*DeleteImageRequest)(nil), "containerd.services.images.v1.DeleteImageRequest") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/images/v1/images.proto", fileDescriptor_8666fa071128ae5f) -} - -var fileDescriptor_8666fa071128ae5f = []byte{ - // 659 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0x8e, 0x93, 0xd4, 0x6d, 0x27, 0x07, 0xca, 0x52, 0x21, 0xcb, 0x40, 0x1a, 0x45, 0x20, 0xe5, - 0xc2, 0x9a, 0x86, 0x0b, 0xb4, 0x08, 0xd1, 0xb4, 0xa5, 0x20, 0x15, 0x0e, 0xe6, 0xaf, 0xe2, 0x52, - 0x6d, 0x92, 0x89, 0xb1, 0x62, 0xc7, 0xc6, 0xbb, 0x89, 0x94, 0x1b, 0x8f, 0x80, 0x04, 0x0f, 0xd5, - 0x23, 0x47, 0x4e, 0x40, 0x73, 0xe0, 0x39, 0x90, 0x77, 0x37, 0x34, 0x4d, 0x22, 0x92, 0x94, 0xde, - 0x66, 0xed, 0xef, 0x9b, 0x9f, 0x6f, 0x66, 0x76, 0x61, 0xcf, 0xf3, 0xc5, 0x87, 0x6e, 0x9d, 0x36, - 0xa2, 0xd0, 0x69, 0x44, 0x1d, 0xc1, 0xfc, 0x0e, 0x26, 0xcd, 0x51, 0x93, 0xc5, 0xbe, 0xc3, 0x31, - 0xe9, 0xf9, 0x0d, 0xe4, 0x8e, 0x1f, 0x32, 0x0f, 0xb9, 0xd3, 0xdb, 0xd4, 0x16, 0x8d, 0x93, 0x48, - 0x44, 0xe4, 0xd6, 0x19, 0x9e, 0x0e, 0xb1, 0x54, 0x23, 0x7a, 0x9b, 0xf6, 0xba, 0x17, 0x79, 0x91, - 0x44, 0x3a, 0xa9, 0xa5, 0x48, 0xf6, 0x0d, 0x2f, 0x8a, 0xbc, 0x00, 0x1d, 0x79, 0xaa, 0x77, 0x5b, - 0x0e, 0x86, 0xb1, 0xe8, 0xeb, 0x9f, 0xa5, 0xf1, 0x9f, 0x2d, 0x1f, 0x83, 0xe6, 0x71, 0xc8, 0x78, - 0x5b, 0x23, 0x36, 0xc6, 0x11, 0xc2, 0x0f, 0x91, 0x0b, 0x16, 0xc6, 0x1a, 0xb0, 0x3d, 0x57, 0x69, - 0xa2, 0x1f, 0x23, 0x77, 0x9a, 0xc8, 0x1b, 0x89, 0x1f, 0x8b, 0x28, 0x51, 0xe4, 0xf2, 0xef, 0x2c, - 0x2c, 0x3d, 0x4f, 0x0b, 0x20, 0x04, 0xf2, 0x1d, 0x16, 0xa2, 0x65, 0x94, 0x8c, 0xca, 0xaa, 0x2b, - 0x6d, 0xf2, 0x0c, 0xcc, 0x80, 0xd5, 0x31, 0xe0, 0x56, 0xb6, 0x94, 0xab, 0x14, 0xaa, 0xf7, 0xe8, - 0x3f, 0x05, 0xa0, 0xd2, 0x13, 0x3d, 0x94, 0x94, 0xfd, 0x8e, 0x48, 0xfa, 0xae, 0xe6, 0x93, 0x2d, - 0x30, 0x05, 0x4b, 0x3c, 0x14, 0x56, 0xae, 0x64, 0x54, 0x0a, 0xd5, 0x9b, 0xa3, 0x9e, 0x64, 0x6e, - 0x74, 0xef, 0x6f, 0x6e, 0xb5, 0xfc, 0xc9, 0x8f, 0x8d, 0x8c, 0xab, 0x19, 0x64, 0x17, 0xa0, 0x91, - 0x20, 0x13, 0xd8, 0x3c, 0x66, 0xc2, 0x5a, 0x96, 0x7c, 0x9b, 0x2a, 0x59, 0xe8, 0x50, 0x16, 0xfa, - 0x7a, 0x28, 0x4b, 0x6d, 0x25, 0x65, 0x7f, 0xfe, 0xb9, 0x61, 0xb8, 0xab, 0x9a, 0xb7, 0x23, 0x9d, - 0x74, 0xe3, 0xe6, 0xd0, 0xc9, 0xca, 0x22, 0x4e, 0x34, 0x6f, 0x47, 0xd8, 0x0f, 0xa1, 0x30, 0x52, - 0x1c, 0x59, 0x83, 0x5c, 0x1b, 0xfb, 0x5a, 0xb1, 0xd4, 0x24, 0xeb, 0xb0, 0xd4, 0x63, 0x41, 0x17, - 0xad, 0xac, 0xfc, 0xa6, 0x0e, 0x5b, 0xd9, 0x07, 0x46, 0xf9, 0x0e, 0x5c, 0x39, 0x40, 0x21, 0x05, - 0x72, 0xf1, 0x63, 0x17, 0xb9, 0x98, 0xa6, 0x78, 0xf9, 0x25, 0xac, 0x9d, 0xc1, 0x78, 0x1c, 0x75, - 0x38, 0x92, 0x2d, 0x58, 0x92, 0x12, 0x4b, 0x60, 0xa1, 0x7a, 0x7b, 0x9e, 0x26, 0xb8, 0x8a, 0x52, - 0x7e, 0x0b, 0x64, 0x57, 0x6a, 0x70, 0x2e, 0xf2, 0x93, 0x0b, 0x78, 0xd4, 0x4d, 0xd1, 0x7e, 0xdf, - 0xc1, 0xb5, 0x73, 0x7e, 0x75, 0xaa, 0xff, 0xef, 0xf8, 0x8b, 0x01, 0xe4, 0x8d, 0x14, 0xfc, 0x72, - 0x33, 0x26, 0xdb, 0x50, 0x50, 0x8d, 0x94, 0xcb, 0x25, 0x1b, 0x34, 0x6d, 0x02, 0x9e, 0xa6, 0xfb, - 0xf7, 0x82, 0xf1, 0xb6, 0xab, 0xe7, 0x25, 0xb5, 0xd3, 0x72, 0xcf, 0x25, 0x75, 0x69, 0xe5, 0xde, - 0x85, 0xab, 0x87, 0x3e, 0x57, 0x0d, 0xe7, 0xc3, 0x62, 0x2d, 0x58, 0x6e, 0xf9, 0x81, 0xc0, 0x84, - 0x5b, 0x46, 0x29, 0x57, 0x59, 0x75, 0x87, 0xc7, 0xf2, 0x11, 0x90, 0x51, 0xb8, 0x4e, 0xa3, 0x06, - 0xa6, 0x0a, 0x22, 0xe1, 0x8b, 0xe5, 0xa1, 0x99, 0xe5, 0x47, 0x40, 0xf6, 0x30, 0xc0, 0x31, 0xd9, - 0xa7, 0x5d, 0x0a, 0x04, 0xf2, 0xbc, 0xdf, 0x69, 0x48, 0x05, 0x57, 0x5c, 0x69, 0x57, 0xbf, 0xe6, - 0xc1, 0x54, 0x49, 0x91, 0x16, 0xe4, 0x0e, 0x50, 0x10, 0x3a, 0x23, 0x87, 0xb1, 0x65, 0xb0, 0x9d, - 0xb9, 0xf1, 0xba, 0xe8, 0x36, 0xe4, 0x53, 0x29, 0xc8, 0xac, 0x3b, 0x69, 0x42, 0x5e, 0x7b, 0x73, - 0x01, 0x86, 0x0e, 0x16, 0x81, 0xa9, 0xc6, 0x9d, 0xcc, 0x22, 0x4f, 0x6e, 0x9b, 0x5d, 0x5d, 0x84, - 0x72, 0x16, 0x50, 0x0d, 0xdc, 0xcc, 0x80, 0x93, 0xcb, 0x32, 0x33, 0xe0, 0xb4, 0x51, 0x7e, 0x05, - 0xa6, 0xea, 0xff, 0xcc, 0x80, 0x93, 0x63, 0x62, 0x5f, 0x9f, 0x58, 0xa3, 0xfd, 0xf4, 0x8d, 0xab, - 0x1d, 0x9d, 0x9c, 0x16, 0x33, 0xdf, 0x4f, 0x8b, 0x99, 0x4f, 0x83, 0xa2, 0x71, 0x32, 0x28, 0x1a, - 0xdf, 0x06, 0x45, 0xe3, 0xd7, 0xa0, 0x68, 0xbc, 0x7f, 0x7c, 0xc1, 0xf7, 0x78, 0x5b, 0x59, 0x47, - 0x99, 0xba, 0x29, 0x63, 0xdd, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x24, 0x4e, 0xca, 0x64, 0xda, - 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ImagesClient is the client API for Images service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ImagesClient interface { - // Get returns an image by name. - Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*GetImageResponse, error) - // List returns a list of all images known to containerd. - List(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) - // Create an image record in the metadata store. - // - // The name of the image must be unique. - Create(ctx context.Context, in *CreateImageRequest, opts ...grpc.CallOption) (*CreateImageResponse, error) - // Update assigns the name to a given target image based on the provided - // image. - Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*UpdateImageResponse, error) - // Delete deletes the image by name. - Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*types1.Empty, error) -} - -type imagesClient struct { - cc *grpc.ClientConn -} - -func NewImagesClient(cc *grpc.ClientConn) ImagesClient { - return &imagesClient{cc} -} - -func (c *imagesClient) Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*GetImageResponse, error) { - out := new(GetImageResponse) - err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *imagesClient) List(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) { - out := new(ListImagesResponse) - err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/List", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *imagesClient) Create(ctx context.Context, in *CreateImageRequest, opts ...grpc.CallOption) (*CreateImageResponse, error) { - out := new(CreateImageResponse) - err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Create", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *imagesClient) Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*UpdateImageResponse, error) { - out := new(UpdateImageResponse) - err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *imagesClient) Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ImagesServer is the server API for Images service. -type ImagesServer interface { - // Get returns an image by name. - Get(context.Context, *GetImageRequest) (*GetImageResponse, error) - // List returns a list of all images known to containerd. - List(context.Context, *ListImagesRequest) (*ListImagesResponse, error) - // Create an image record in the metadata store. - // - // The name of the image must be unique. - Create(context.Context, *CreateImageRequest) (*CreateImageResponse, error) - // Update assigns the name to a given target image based on the provided - // image. - Update(context.Context, *UpdateImageRequest) (*UpdateImageResponse, error) - // Delete deletes the image by name. - Delete(context.Context, *DeleteImageRequest) (*types1.Empty, error) -} - -// UnimplementedImagesServer can be embedded to have forward compatible implementations. -type UnimplementedImagesServer struct { -} - -func (*UnimplementedImagesServer) Get(ctx context.Context, req *GetImageRequest) (*GetImageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedImagesServer) List(ctx context.Context, req *ListImagesRequest) (*ListImagesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedImagesServer) Create(ctx context.Context, req *CreateImageRequest) (*CreateImageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") -} -func (*UnimplementedImagesServer) Update(ctx context.Context, req *UpdateImageRequest) (*UpdateImageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (*UnimplementedImagesServer) Delete(ctx context.Context, req *DeleteImageRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} - -func RegisterImagesServer(s *grpc.Server, srv ImagesServer) { - s.RegisterService(&_Images_serviceDesc, srv) -} - -func _Images_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetImageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ImagesServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.images.v1.Images/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ImagesServer).Get(ctx, req.(*GetImageRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Images_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListImagesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ImagesServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.images.v1.Images/List", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ImagesServer).List(ctx, req.(*ListImagesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Images_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateImageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ImagesServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.images.v1.Images/Create", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ImagesServer).Create(ctx, req.(*CreateImageRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Images_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateImageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ImagesServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.images.v1.Images/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ImagesServer).Update(ctx, req.(*UpdateImageRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Images_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteImageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ImagesServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.images.v1.Images/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ImagesServer).Delete(ctx, req.(*DeleteImageRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Images_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.images.v1.Images", - HandlerType: (*ImagesServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Get", - Handler: _Images_Get_Handler, - }, - { - MethodName: "List", - Handler: _Images_List_Handler, - }, - { - MethodName: "Create", - Handler: _Images_Create_Handler, - }, - { - MethodName: "Update", - Handler: _Images_Update_Handler, - }, - { - MethodName: "Delete", - Handler: _Images_Delete_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/images/v1/images.proto", -} - -func (m *Image) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Image) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Image) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintImages(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x42 - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintImages(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x3a - { - size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintImages(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintImages(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintImages(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintImages(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetImageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetImageRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintImages(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetImageResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetImageResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetImageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Image != nil { - { - size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CreateImageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateImageRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CreateImageResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateImageResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateImageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateImageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateImageRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.UpdateMask != nil { - { - size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateImageResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateImageResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateImageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ListImagesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListImagesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListImagesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintImages(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ListImagesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListImagesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListImagesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Images) > 0 { - for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DeleteImageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteImageRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Sync { - i-- - if m.Sync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintImages(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintImages(dAtA []byte, offset int, v uint64) int { - offset -= sovImages(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Image) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovImages(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovImages(uint64(len(k))) + 1 + len(v) + sovImages(uint64(len(v))) - n += mapEntrySize + 1 + sovImages(uint64(mapEntrySize)) - } - } - l = m.Target.Size() - n += 1 + l + sovImages(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovImages(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovImages(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetImageRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovImages(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetImageResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Image != nil { - l = m.Image.Size() - n += 1 + l + sovImages(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateImageRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Image.Size() - n += 1 + l + sovImages(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateImageResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Image.Size() - n += 1 + l + sovImages(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateImageRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Image.Size() - n += 1 + l + sovImages(uint64(l)) - if m.UpdateMask != nil { - l = m.UpdateMask.Size() - n += 1 + l + sovImages(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateImageResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Image.Size() - n += 1 + l + sovImages(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListImagesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovImages(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListImagesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Images) > 0 { - for _, e := range m.Images { - l = e.Size() - n += 1 + l + sovImages(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteImageRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovImages(uint64(l)) - } - if m.Sync { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovImages(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozImages(x uint64) (n int) { - return sovImages(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Image) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&Image{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Labels:` + mapStringForLabels + `,`, - `Target:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Target), "Descriptor", "types.Descriptor", 1), `&`, ``, 1) + `,`, - `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetImageRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetImageRequest{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetImageResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetImageResponse{`, - `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateImageRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateImageRequest{`, - `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateImageResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateImageResponse{`, - `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateImageRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateImageRequest{`, - `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, - `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types1.FieldMask", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateImageResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateImageResponse{`, - `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListImagesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListImagesRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListImagesResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForImages := "[]Image{" - for _, f := range this.Images { - repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "Image", "Image", 1), `&`, ``, 1) + "," - } - repeatedStringForImages += "}" - s := strings.Join([]string{`&ListImagesResponse{`, - `Images:` + repeatedStringForImages + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteImageRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteImageRequest{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Sync:` + fmt.Sprintf("%v", this.Sync) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringImages(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Image) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Image: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthImages - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthImages - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthImages - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthImages - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetImageRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetImageRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetImageRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetImageResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetImageResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetImageResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Image == nil { - m.Image = &Image{} - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateImageRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateImageRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateImageRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateImageResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateImageResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateImageResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateImageRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateImageRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateImageRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdateMask == nil { - m.UpdateMask = &types1.FieldMask{} - } - if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateImageResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateImageResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateImageResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListImagesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListImagesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListImagesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListImagesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListImagesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListImagesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Images = append(m.Images, Image{}) - if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteImageRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteImageRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteImageRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Sync = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipImages(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowImages - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowImages - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowImages - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthImages - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupImages - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthImages - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{9} +} + +func (x *DeleteImageRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DeleteImageRequest) GetSync() bool { + if x != nil { + return x.Sync + } + return false +} + +var File_github_com_containerd_containerd_api_services_images_v1_images_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, + 0x65, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xcc, 0x02, 0x0a, 0x05, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x48, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x25, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x4e, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x05, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, 0x67, + 0x65, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x22, 0x98, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3a, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x46, 0x0a, 0x11, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x65, 0x45, 0x70, + 0x6f, 0x63, 0x68, 0x22, 0x51, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, + 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x05, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, + 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x22, 0xd5, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, + 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, + 0x67, 0x65, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x46, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x65, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x22, 0x51, + 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, + 0x65, 0x22, 0x2d, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x22, 0x52, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x06, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x06, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x73, 0x22, 0x3c, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6d, + 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x73, 0x79, + 0x6e, 0x63, 0x32, 0x94, 0x04, 0x0a, 0x06, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x12, 0x66, 0x0a, + 0x03, 0x47, 0x65, 0x74, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x30, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x31, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x31, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x31, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthImages = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowImages = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupImages = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescData = file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_github_com_containerd_containerd_api_services_images_v1_images_proto_goTypes = []interface{}{ + (*Image)(nil), // 0: containerd.services.images.v1.Image + (*GetImageRequest)(nil), // 1: containerd.services.images.v1.GetImageRequest + (*GetImageResponse)(nil), // 2: containerd.services.images.v1.GetImageResponse + (*CreateImageRequest)(nil), // 3: containerd.services.images.v1.CreateImageRequest + (*CreateImageResponse)(nil), // 4: containerd.services.images.v1.CreateImageResponse + (*UpdateImageRequest)(nil), // 5: containerd.services.images.v1.UpdateImageRequest + (*UpdateImageResponse)(nil), // 6: containerd.services.images.v1.UpdateImageResponse + (*ListImagesRequest)(nil), // 7: containerd.services.images.v1.ListImagesRequest + (*ListImagesResponse)(nil), // 8: containerd.services.images.v1.ListImagesResponse + (*DeleteImageRequest)(nil), // 9: containerd.services.images.v1.DeleteImageRequest + nil, // 10: containerd.services.images.v1.Image.LabelsEntry + (*types.Descriptor)(nil), // 11: containerd.types.Descriptor + (*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 13: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 14: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_images_v1_images_proto_depIdxs = []int32{ + 10, // 0: containerd.services.images.v1.Image.labels:type_name -> containerd.services.images.v1.Image.LabelsEntry + 11, // 1: containerd.services.images.v1.Image.target:type_name -> containerd.types.Descriptor + 12, // 2: containerd.services.images.v1.Image.created_at:type_name -> google.protobuf.Timestamp + 12, // 3: containerd.services.images.v1.Image.updated_at:type_name -> google.protobuf.Timestamp + 0, // 4: containerd.services.images.v1.GetImageResponse.image:type_name -> containerd.services.images.v1.Image + 0, // 5: containerd.services.images.v1.CreateImageRequest.image:type_name -> containerd.services.images.v1.Image + 12, // 6: containerd.services.images.v1.CreateImageRequest.source_date_epoch:type_name -> google.protobuf.Timestamp + 0, // 7: containerd.services.images.v1.CreateImageResponse.image:type_name -> containerd.services.images.v1.Image + 0, // 8: containerd.services.images.v1.UpdateImageRequest.image:type_name -> containerd.services.images.v1.Image + 13, // 9: containerd.services.images.v1.UpdateImageRequest.update_mask:type_name -> google.protobuf.FieldMask + 12, // 10: containerd.services.images.v1.UpdateImageRequest.source_date_epoch:type_name -> google.protobuf.Timestamp + 0, // 11: containerd.services.images.v1.UpdateImageResponse.image:type_name -> containerd.services.images.v1.Image + 0, // 12: containerd.services.images.v1.ListImagesResponse.images:type_name -> containerd.services.images.v1.Image + 1, // 13: containerd.services.images.v1.Images.Get:input_type -> containerd.services.images.v1.GetImageRequest + 7, // 14: containerd.services.images.v1.Images.List:input_type -> containerd.services.images.v1.ListImagesRequest + 3, // 15: containerd.services.images.v1.Images.Create:input_type -> containerd.services.images.v1.CreateImageRequest + 5, // 16: containerd.services.images.v1.Images.Update:input_type -> containerd.services.images.v1.UpdateImageRequest + 9, // 17: containerd.services.images.v1.Images.Delete:input_type -> containerd.services.images.v1.DeleteImageRequest + 2, // 18: containerd.services.images.v1.Images.Get:output_type -> containerd.services.images.v1.GetImageResponse + 8, // 19: containerd.services.images.v1.Images.List:output_type -> containerd.services.images.v1.ListImagesResponse + 4, // 20: containerd.services.images.v1.Images.Create:output_type -> containerd.services.images.v1.CreateImageResponse + 6, // 21: containerd.services.images.v1.Images.Update:output_type -> containerd.services.images.v1.UpdateImageResponse + 14, // 22: containerd.services.images.v1.Images.Delete:output_type -> google.protobuf.Empty + 18, // [18:23] is the sub-list for method output_type + 13, // [13:18] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_images_v1_images_proto_init() } +func file_github_com_containerd_containerd_api_services_images_v1_images_proto_init() { + if File_github_com_containerd_containerd_api_services_images_v1_images_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Image); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetImageRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetImageResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateImageRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateImageResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateImageRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateImageResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListImagesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListImagesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteImageRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDesc, + NumEnums: 0, + NumMessages: 11, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_images_v1_images_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_images_v1_images_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_images_v1_images_proto = out.File + file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_images_v1_images_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_images_v1_images_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto b/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto index dee4503e27..b32df4b051 100644 --- a/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto +++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto @@ -18,7 +18,6 @@ syntax = "proto3"; package containerd.services.images.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; @@ -71,13 +70,13 @@ message Image { map labels = 2; // Target describes the content entry point of the image. - containerd.types.Descriptor target = 3 [(gogoproto.nullable) = false]; + containerd.types.Descriptor target = 3; // CreatedAt is the time the image was first created. - google.protobuf.Timestamp created_at = 7 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp created_at = 7; // UpdatedAt is the last time the image was mutated. - google.protobuf.Timestamp updated_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp updated_at = 8; } message GetImageRequest { @@ -89,26 +88,30 @@ message GetImageResponse { } message CreateImageRequest { - Image image = 1 [(gogoproto.nullable) = false]; + Image image = 1; + + google.protobuf.Timestamp source_date_epoch = 2; } message CreateImageResponse { - Image image = 1 [(gogoproto.nullable) = false]; + Image image = 1; } message UpdateImageRequest { // Image provides a full or partial image for update. // // The name field must be set or an error will be returned. - Image image = 1 [(gogoproto.nullable) = false]; + Image image = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. google.protobuf.FieldMask update_mask = 2; + + google.protobuf.Timestamp source_date_epoch = 3; } message UpdateImageResponse { - Image image = 1 [(gogoproto.nullable) = false]; + Image image = 1; } message ListImagesRequest { @@ -126,7 +129,7 @@ message ListImagesRequest { } message ListImagesResponse { - repeated Image images = 1 [(gogoproto.nullable) = false]; + repeated Image images = 1; } message DeleteImageRequest { diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/images/v1/images_grpc.pb.go new file mode 100644 index 0000000000..86a4602e03 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images_grpc.pb.go @@ -0,0 +1,266 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/images/v1/images.proto + +package images + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ImagesClient is the client API for Images service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ImagesClient interface { + // Get returns an image by name. + Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*GetImageResponse, error) + // List returns a list of all images known to containerd. + List(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) + // Create an image record in the metadata store. + // + // The name of the image must be unique. + Create(ctx context.Context, in *CreateImageRequest, opts ...grpc.CallOption) (*CreateImageResponse, error) + // Update assigns the name to a given target image based on the provided + // image. + Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*UpdateImageResponse, error) + // Delete deletes the image by name. + Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type imagesClient struct { + cc grpc.ClientConnInterface +} + +func NewImagesClient(cc grpc.ClientConnInterface) ImagesClient { + return &imagesClient{cc} +} + +func (c *imagesClient) Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*GetImageResponse, error) { + out := new(GetImageResponse) + err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imagesClient) List(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) { + out := new(ListImagesResponse) + err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imagesClient) Create(ctx context.Context, in *CreateImageRequest, opts ...grpc.CallOption) (*CreateImageResponse, error) { + out := new(CreateImageResponse) + err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imagesClient) Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*UpdateImageResponse, error) { + out := new(UpdateImageResponse) + err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imagesClient) Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ImagesServer is the server API for Images service. +// All implementations must embed UnimplementedImagesServer +// for forward compatibility +type ImagesServer interface { + // Get returns an image by name. + Get(context.Context, *GetImageRequest) (*GetImageResponse, error) + // List returns a list of all images known to containerd. + List(context.Context, *ListImagesRequest) (*ListImagesResponse, error) + // Create an image record in the metadata store. + // + // The name of the image must be unique. + Create(context.Context, *CreateImageRequest) (*CreateImageResponse, error) + // Update assigns the name to a given target image based on the provided + // image. + Update(context.Context, *UpdateImageRequest) (*UpdateImageResponse, error) + // Delete deletes the image by name. + Delete(context.Context, *DeleteImageRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedImagesServer() +} + +// UnimplementedImagesServer must be embedded to have forward compatible implementations. +type UnimplementedImagesServer struct { +} + +func (UnimplementedImagesServer) Get(context.Context, *GetImageRequest) (*GetImageResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedImagesServer) List(context.Context, *ListImagesRequest) (*ListImagesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedImagesServer) Create(context.Context, *CreateImageRequest) (*CreateImageResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedImagesServer) Update(context.Context, *UpdateImageRequest) (*UpdateImageResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedImagesServer) Delete(context.Context, *DeleteImageRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedImagesServer) mustEmbedUnimplementedImagesServer() {} + +// UnsafeImagesServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ImagesServer will +// result in compilation errors. +type UnsafeImagesServer interface { + mustEmbedUnimplementedImagesServer() +} + +func RegisterImagesServer(s grpc.ServiceRegistrar, srv ImagesServer) { + s.RegisterService(&Images_ServiceDesc, srv) +} + +func _Images_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImagesServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.images.v1.Images/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImagesServer).Get(ctx, req.(*GetImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Images_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListImagesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImagesServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.images.v1.Images/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImagesServer).List(ctx, req.(*ListImagesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Images_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImagesServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.images.v1.Images/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImagesServer).Create(ctx, req.(*CreateImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Images_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImagesServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.images.v1.Images/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImagesServer).Update(ctx, req.(*UpdateImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Images_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImagesServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.images.v1.Images/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImagesServer).Delete(ctx, req.(*DeleteImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Images_ServiceDesc is the grpc.ServiceDesc for Images service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Images_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.images.v1.Images", + HandlerType: (*ImagesServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _Images_Get_Handler, + }, + { + MethodName: "List", + Handler: _Images_List_Handler, + }, + { + MethodName: "Create", + Handler: _Images_Create_Handler, + }, + { + MethodName: "Update", + Handler: _Images_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Images_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/images/v1/images.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go index 65e015d4cd..aeef7c3a02 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go @@ -1,38 +1,48 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/introspection/v1/introspection.proto package introspection import ( - context "context" - fmt "fmt" types "github.com/containerd/containerd/api/types" - rpc "github.com/gogo/googleapis/google/rpc" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - types1 "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Plugin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Type defines the type of plugin. // // See package plugin for a list of possible values. Non core plugins may @@ -51,7 +61,7 @@ type Plugin struct { // // If the plugin prefers certain platforms over others, they should be // listed from most to least preferred. - Platforms []types.Platform `protobuf:"bytes,4,rep,name=platforms,proto3" json:"platforms"` + Platforms []*types.Platform `protobuf:"bytes,4,rep,name=platforms,proto3" json:"platforms,omitempty"` // Exports allows plugins to provide values about state or configuration to // interested parties. // @@ -69,45 +79,95 @@ type Plugin struct { // was encountered during initialization. // // Plugins that have this value set cannot be used. - InitErr *rpc.Status `protobuf:"bytes,7,opt,name=init_err,json=initErr,proto3" json:"init_err,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + InitErr *status.Status `protobuf:"bytes,7,opt,name=init_err,json=initErr,proto3" json:"init_err,omitempty"` } -func (m *Plugin) Reset() { *m = Plugin{} } -func (*Plugin) ProtoMessage() {} -func (*Plugin) Descriptor() ([]byte, []int) { - return fileDescriptor_1a14fda866f10715, []int{0} -} -func (m *Plugin) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Plugin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Plugin.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Plugin) Reset() { + *x = Plugin{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Plugin) XXX_Merge(src proto.Message) { - xxx_messageInfo_Plugin.Merge(m, src) -} -func (m *Plugin) XXX_Size() int { - return m.Size() -} -func (m *Plugin) XXX_DiscardUnknown() { - xxx_messageInfo_Plugin.DiscardUnknown(m) + +func (x *Plugin) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_Plugin proto.InternalMessageInfo +func (*Plugin) ProtoMessage() {} + +func (x *Plugin) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Plugin.ProtoReflect.Descriptor instead. +func (*Plugin) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescGZIP(), []int{0} +} + +func (x *Plugin) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Plugin) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Plugin) GetRequires() []string { + if x != nil { + return x.Requires + } + return nil +} + +func (x *Plugin) GetPlatforms() []*types.Platform { + if x != nil { + return x.Platforms + } + return nil +} + +func (x *Plugin) GetExports() map[string]string { + if x != nil { + return x.Exports + } + return nil +} + +func (x *Plugin) GetCapabilities() []string { + if x != nil { + return x.Capabilities + } + return nil +} + +func (x *Plugin) GetInitErr() *status.Status { + if x != nil { + return x.InitErr + } + return nil +} type PluginsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Filters contains one or more filters using the syntax defined in the // containerd filter package. // @@ -118,1419 +178,346 @@ type PluginsRequest struct { // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func (m *PluginsRequest) Reset() { *m = PluginsRequest{} } -func (*PluginsRequest) ProtoMessage() {} -func (*PluginsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1a14fda866f10715, []int{1} -} -func (m *PluginsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *PluginsRequest) Reset() { + *x = PluginsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *PluginsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginsRequest.Merge(m, src) -} -func (m *PluginsRequest) XXX_Size() int { - return m.Size() -} -func (m *PluginsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PluginsRequest.DiscardUnknown(m) + +func (x *PluginsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_PluginsRequest proto.InternalMessageInfo +func (*PluginsRequest) ProtoMessage() {} + +func (x *PluginsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PluginsRequest.ProtoReflect.Descriptor instead. +func (*PluginsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescGZIP(), []int{1} +} + +func (x *PluginsRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil +} type PluginsResponse struct { - Plugins []Plugin `protobuf:"bytes,1,rep,name=plugins,proto3" json:"plugins"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Plugins []*Plugin `protobuf:"bytes,1,rep,name=plugins,proto3" json:"plugins,omitempty"` } -func (m *PluginsResponse) Reset() { *m = PluginsResponse{} } -func (*PluginsResponse) ProtoMessage() {} -func (*PluginsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1a14fda866f10715, []int{2} -} -func (m *PluginsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *PluginsResponse) Reset() { + *x = PluginsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *PluginsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginsResponse.Merge(m, src) -} -func (m *PluginsResponse) XXX_Size() int { - return m.Size() -} -func (m *PluginsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PluginsResponse.DiscardUnknown(m) + +func (x *PluginsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo +func (*PluginsResponse) ProtoMessage() {} + +func (x *PluginsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PluginsResponse.ProtoReflect.Descriptor instead. +func (*PluginsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescGZIP(), []int{2} +} + +func (x *PluginsResponse) GetPlugins() []*Plugin { + if x != nil { + return x.Plugins + } + return nil +} type ServerResponse struct { - UUID string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UUID string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Pid uint64 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + Pidns uint64 `protobuf:"varint,3,opt,name=pidns,proto3" json:"pidns,omitempty"` // PID namespace, such as 4026531836 +} + +func (x *ServerResponse) Reset() { + *x = ServerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ServerResponse) Reset() { *m = ServerResponse{} } func (*ServerResponse) ProtoMessage() {} + +func (x *ServerResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerResponse.ProtoReflect.Descriptor instead. func (*ServerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1a14fda866f10715, []int{3} -} -func (m *ServerResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServerResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ServerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerResponse.Merge(m, src) -} -func (m *ServerResponse) XXX_Size() int { - return m.Size() -} -func (m *ServerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ServerResponse.DiscardUnknown(m) + return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescGZIP(), []int{3} } -var xxx_messageInfo_ServerResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry") - proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest") - proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse") - proto.RegisterType((*ServerResponse)(nil), "containerd.services.introspection.v1.ServerResponse") +func (x *ServerResponse) GetUUID() string { + if x != nil { + return x.UUID + } + return "" } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", fileDescriptor_1a14fda866f10715) +func (x *ServerResponse) GetPid() uint64 { + if x != nil { + return x.Pid + } + return 0 } -var fileDescriptor_1a14fda866f10715 = []byte{ - // 549 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0xad, 0x9d, 0x34, 0x6e, 0x37, 0xa5, 0xa0, 0x55, 0x55, 0x2c, 0x83, 0x9c, 0x28, 0xe2, 0x10, - 0x21, 0x58, 0xab, 0x01, 0x24, 0x5a, 0x24, 0x0e, 0x51, 0x73, 0x88, 0xd4, 0x43, 0xe5, 0xa8, 0x08, - 0x71, 0xa9, 0x1c, 0x67, 0x63, 0x56, 0x38, 0xde, 0xed, 0xee, 0xda, 0x22, 0x37, 0x3e, 0x2f, 0x47, - 0x8e, 0x9c, 0x02, 0xf5, 0x37, 0xf0, 0x01, 0xc8, 0xbb, 0x76, 0x9a, 0xdc, 0x12, 0x71, 0x9b, 0x79, - 0x33, 0x6f, 0xe6, 0xcd, 0xf3, 0xca, 0xc0, 0x8f, 0x88, 0xfc, 0x9a, 0x8e, 0x51, 0x48, 0x67, 0x5e, - 0x48, 0x13, 0x19, 0x90, 0x04, 0xf3, 0xc9, 0x7a, 0x18, 0x30, 0xe2, 0x09, 0xcc, 0x33, 0x12, 0x62, - 0xe1, 0x91, 0x44, 0x72, 0x2a, 0x18, 0x0e, 0x25, 0xa1, 0x89, 0x97, 0x9d, 0x6d, 0x02, 0x88, 0x71, - 0x2a, 0x29, 0x7c, 0xf1, 0xc0, 0x46, 0x15, 0x13, 0x6d, 0x36, 0x66, 0x67, 0xce, 0xf9, 0x56, 0x9b, - 0xe5, 0x9c, 0x61, 0xe1, 0xb1, 0x38, 0x90, 0x53, 0xca, 0x67, 0x7a, 0x81, 0xf3, 0x34, 0xa2, 0x34, - 0x8a, 0xb1, 0xc7, 0x59, 0xe8, 0x09, 0x19, 0xc8, 0x54, 0x94, 0x85, 0x67, 0x65, 0x41, 0x65, 0xe3, - 0x74, 0xea, 0xe1, 0x19, 0x93, 0xf3, 0xb2, 0x78, 0x12, 0xd1, 0x88, 0xaa, 0xd0, 0x2b, 0x22, 0x8d, - 0x76, 0xfe, 0x9a, 0xa0, 0x71, 0x1d, 0xa7, 0x11, 0x49, 0x20, 0x04, 0xf5, 0x62, 0x9d, 0x6d, 0xb4, - 0x8d, 0xee, 0xa1, 0xaf, 0x62, 0x78, 0x0a, 0x4c, 0x32, 0xb1, 0xcd, 0x02, 0xe9, 0x37, 0xf2, 0x65, - 0xcb, 0x1c, 0x5e, 0xfa, 0x26, 0x99, 0x40, 0x07, 0x1c, 0x70, 0x7c, 0x97, 0x12, 0x8e, 0x85, 0x5d, - 0x6b, 0xd7, 0xba, 0x87, 0xfe, 0x2a, 0x87, 0x1f, 0xc1, 0x61, 0x25, 0x58, 0xd8, 0xf5, 0x76, 0xad, - 0xdb, 0xec, 0x39, 0x68, 0xcd, 0x13, 0x75, 0x13, 0xba, 0x2e, 0x5b, 0xfa, 0xf5, 0xc5, 0xb2, 0xb5, - 0xe7, 0x3f, 0x50, 0xe0, 0x08, 0x58, 0xf8, 0x3b, 0xa3, 0x5c, 0x0a, 0x7b, 0x5f, 0xb1, 0xcf, 0xd1, - 0x36, 0x8e, 0x22, 0x7d, 0x06, 0x1a, 0x68, 0xee, 0x20, 0x91, 0x7c, 0xee, 0x57, 0x93, 0x60, 0x07, - 0x1c, 0x85, 0x01, 0x0b, 0xc6, 0x24, 0x26, 0x92, 0x60, 0x61, 0x37, 0x94, 0xe8, 0x0d, 0x0c, 0xbe, - 0x06, 0x07, 0x24, 0x21, 0xf2, 0x16, 0x73, 0x6e, 0x5b, 0x6d, 0xa3, 0xdb, 0xec, 0x41, 0xa4, 0x1d, - 0x45, 0x9c, 0x85, 0x68, 0xa4, 0xac, 0xf6, 0xad, 0xa2, 0x67, 0xc0, 0xb9, 0x73, 0x01, 0x8e, 0xd6, - 0x77, 0xc1, 0x27, 0xa0, 0xf6, 0x0d, 0xcf, 0x4b, 0xfb, 0x8a, 0x10, 0x9e, 0x80, 0xfd, 0x2c, 0x88, - 0x53, 0xac, 0x0d, 0xf4, 0x75, 0x72, 0x61, 0xbe, 0x37, 0x3a, 0x2f, 0xc1, 0xb1, 0x96, 0x2b, 0x7c, - 0x7c, 0x97, 0x62, 0x21, 0xa1, 0x0d, 0xac, 0x29, 0x89, 0x25, 0xe6, 0xc2, 0x36, 0x94, 0xb6, 0x2a, - 0xed, 0xdc, 0x82, 0xc7, 0xab, 0x5e, 0xc1, 0x68, 0x22, 0x30, 0xbc, 0x02, 0x16, 0xd3, 0x90, 0x6a, - 0x6e, 0xf6, 0x5e, 0xed, 0x62, 0x51, 0x69, 0x79, 0x35, 0xa2, 0x83, 0xc0, 0xf1, 0x08, 0xf3, 0x0c, - 0xf3, 0xd5, 0xfc, 0xe7, 0xa0, 0x9e, 0xa6, 0x64, 0xa2, 0x6f, 0xe9, 0x1f, 0xe4, 0xcb, 0x56, 0xfd, - 0xe6, 0x66, 0x78, 0xe9, 0x2b, 0xb4, 0xf7, 0xdb, 0x00, 0x8f, 0x86, 0xeb, 0xa3, 0x61, 0x06, 0xac, - 0x52, 0x22, 0x7c, 0xbb, 0x8b, 0x92, 0xea, 0x7a, 0xe7, 0xdd, 0x8e, 0xac, 0x52, 0xe7, 0x27, 0xd0, - 0xd0, 0xca, 0xe1, 0x69, 0xf5, 0xa5, 0xaa, 0xb7, 0x8f, 0x06, 0xc5, 0xdb, 0x77, 0xb6, 0x94, 0xb3, - 0x79, 0x7f, 0x7f, 0xba, 0xb8, 0x77, 0xf7, 0x7e, 0xdd, 0xbb, 0x7b, 0x3f, 0x72, 0xd7, 0x58, 0xe4, - 0xae, 0xf1, 0x33, 0x77, 0x8d, 0x3f, 0xb9, 0x6b, 0x7c, 0xb9, 0xfa, 0xbf, 0x1f, 0xc6, 0x87, 0x0d, - 0xe0, 0x73, 0x6d, 0xdc, 0x50, 0x7a, 0xdf, 0xfc, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xb3, 0x50, - 0xdc, 0x89, 0x04, 0x00, 0x00, +func (x *ServerResponse) GetPidns() uint64 { + if x != nil { + return x.Pidns + } + return 0 } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn +var File_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto protoreflect.FileDescriptor -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// IntrospectionClient is the client API for Introspection service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type IntrospectionClient interface { - // Plugins returns a list of plugins in containerd. - // - // Clients can use this to detect features and capabilities when using - // containerd. - Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) - // Server returns information about the containerd server - Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) -} - -type introspectionClient struct { - cc *grpc.ClientConn -} - -func NewIntrospectionClient(cc *grpc.ClientConn) IntrospectionClient { - return &introspectionClient{cc} -} - -func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) { - out := new(PluginsResponse) - err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Plugins", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *introspectionClient) Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) { - out := new(ServerResponse) - err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Server", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// IntrospectionServer is the server API for Introspection service. -type IntrospectionServer interface { - // Plugins returns a list of plugins in containerd. - // - // Clients can use this to detect features and capabilities when using - // containerd. - Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error) - // Server returns information about the containerd server - Server(context.Context, *types1.Empty) (*ServerResponse, error) -} - -// UnimplementedIntrospectionServer can be embedded to have forward compatible implementations. -type UnimplementedIntrospectionServer struct { -} - -func (*UnimplementedIntrospectionServer) Plugins(ctx context.Context, req *PluginsRequest) (*PluginsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Plugins not implemented") -} -func (*UnimplementedIntrospectionServer) Server(ctx context.Context, req *types1.Empty) (*ServerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Server not implemented") -} - -func RegisterIntrospectionServer(s *grpc.Server, srv IntrospectionServer) { - s.RegisterService(&_Introspection_serviceDesc, srv) -} - -func _Introspection_Plugins_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PluginsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IntrospectionServer).Plugins(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.introspection.v1.Introspection/Plugins", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IntrospectionServer).Plugins(ctx, req.(*PluginsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Introspection_Server_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(types1.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IntrospectionServer).Server(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.introspection.v1.Introspection/Server", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IntrospectionServer).Server(ctx, req.(*types1.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -var _Introspection_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.introspection.v1.Introspection", - HandlerType: (*IntrospectionServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Plugins", - Handler: _Introspection_Plugins_Handler, - }, - { - MethodName: "Server", - Handler: _Introspection_Server_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", -} - -func (m *Plugin) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Plugin) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Plugin) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.InitErr != nil { - { - size, err := m.InitErr.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if len(m.Capabilities) > 0 { - for iNdEx := len(m.Capabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Capabilities[iNdEx]) - copy(dAtA[i:], m.Capabilities[iNdEx]) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Capabilities[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if len(m.Exports) > 0 { - for k := range m.Exports { - v := m.Exports[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintIntrospection(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintIntrospection(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintIntrospection(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Platforms) > 0 { - for iNdEx := len(m.Platforms) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Platforms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Requires) > 0 { - for iNdEx := len(m.Requires) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Requires[iNdEx]) - copy(dAtA[i:], m.Requires[iNdEx]) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Requires[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0x12 - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PluginsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PluginsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *PluginsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PluginsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Plugins) > 0 { - for iNdEx := len(m.Plugins) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Plugins[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ServerResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServerResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.UUID) > 0 { - i -= len(m.UUID) - copy(dAtA[i:], m.UUID) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UUID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int { - offset -= sovIntrospection(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Plugin) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - l = len(m.ID) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - if len(m.Requires) > 0 { - for _, s := range m.Requires { - l = len(s) - n += 1 + l + sovIntrospection(uint64(l)) - } - } - if len(m.Platforms) > 0 { - for _, e := range m.Platforms { - l = e.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - } - if len(m.Exports) > 0 { - for k, v := range m.Exports { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovIntrospection(uint64(len(k))) + 1 + len(v) + sovIntrospection(uint64(len(v))) - n += mapEntrySize + 1 + sovIntrospection(uint64(mapEntrySize)) - } - } - if len(m.Capabilities) > 0 { - for _, s := range m.Capabilities { - l = len(s) - n += 1 + l + sovIntrospection(uint64(l)) - } - } - if m.InitErr != nil { - l = m.InitErr.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PluginsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovIntrospection(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PluginsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Plugins) > 0 { - for _, e := range m.Plugins { - l = e.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ServerResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UUID) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovIntrospection(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozIntrospection(x uint64) (n int) { - return sovIntrospection(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Plugin) String() string { - if this == nil { - return "nil" - } - repeatedStringForPlatforms := "[]Platform{" - for _, f := range this.Platforms { - repeatedStringForPlatforms += fmt.Sprintf("%v", f) + "," - } - repeatedStringForPlatforms += "}" - keysForExports := make([]string, 0, len(this.Exports)) - for k, _ := range this.Exports { - keysForExports = append(keysForExports, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExports) - mapStringForExports := "map[string]string{" - for _, k := range keysForExports { - mapStringForExports += fmt.Sprintf("%v: %v,", k, this.Exports[k]) - } - mapStringForExports += "}" - s := strings.Join([]string{`&Plugin{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Requires:` + fmt.Sprintf("%v", this.Requires) + `,`, - `Platforms:` + repeatedStringForPlatforms + `,`, - `Exports:` + mapStringForExports + `,`, - `Capabilities:` + fmt.Sprintf("%v", this.Capabilities) + `,`, - `InitErr:` + strings.Replace(fmt.Sprintf("%v", this.InitErr), "Status", "rpc.Status", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PluginsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PluginsRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PluginsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForPlugins := "[]Plugin{" - for _, f := range this.Plugins { - repeatedStringForPlugins += strings.Replace(strings.Replace(f.String(), "Plugin", "Plugin", 1), `&`, ``, 1) + "," - } - repeatedStringForPlugins += "}" - s := strings.Join([]string{`&PluginsResponse{`, - `Plugins:` + repeatedStringForPlugins + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ServerResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServerResponse{`, - `UUID:` + fmt.Sprintf("%v", this.UUID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringIntrospection(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Plugin) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Plugin: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Plugin: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requires", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Requires = append(m.Requires, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Platforms = append(m.Platforms, types.Platform{}) - if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Exports == nil { - m.Exports = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthIntrospection - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthIntrospection - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthIntrospection - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthIntrospection - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Exports[mapkey] = mapvalue - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Capabilities = append(m.Capabilities, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitErr", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.InitErr == nil { - m.InitErr = &rpc.Status{} - } - if err := m.InitErr.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Plugins", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Plugins = append(m.Plugins, Plugin{}) - if err := m.Plugins[len(m.Plugins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServerResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServerResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServerResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UUID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipIntrospection(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIntrospection - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIntrospection - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIntrospection - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthIntrospection - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupIntrospection - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthIntrospection - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDesc = []byte{ + 0x0a, 0x52, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, + 0x2f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x24, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, + 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x39, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe6, 0x02, 0x0a, 0x06, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, + 0x72, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, + 0x12, 0x53, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x78, + 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x08, 0x69, 0x6e, 0x69, + 0x74, 0x5f, 0x65, 0x72, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x07, 0x69, 0x6e, 0x69, 0x74, 0x45, 0x72, 0x72, 0x1a, 0x3a, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x22, 0x59, 0x0a, 0x0f, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, + 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x52, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x22, 0x4c, 0x0a, 0x0e, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, + 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, + 0x70, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x69, 0x64, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x70, 0x69, 0x64, 0x6e, 0x73, 0x32, 0xdf, 0x01, 0x0a, 0x0d, 0x49, 0x6e, + 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x76, 0x0a, 0x07, 0x50, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x12, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, + 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, + 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x4e, 0x5a, 0x4c, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x6e, 0x74, + 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x6e, + 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthIntrospection = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowIntrospection = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupIntrospection = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescData = file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_goTypes = []interface{}{ + (*Plugin)(nil), // 0: containerd.services.introspection.v1.Plugin + (*PluginsRequest)(nil), // 1: containerd.services.introspection.v1.PluginsRequest + (*PluginsResponse)(nil), // 2: containerd.services.introspection.v1.PluginsResponse + (*ServerResponse)(nil), // 3: containerd.services.introspection.v1.ServerResponse + nil, // 4: containerd.services.introspection.v1.Plugin.ExportsEntry + (*types.Platform)(nil), // 5: containerd.types.Platform + (*status.Status)(nil), // 6: google.rpc.Status + (*emptypb.Empty)(nil), // 7: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_depIdxs = []int32{ + 5, // 0: containerd.services.introspection.v1.Plugin.platforms:type_name -> containerd.types.Platform + 4, // 1: containerd.services.introspection.v1.Plugin.exports:type_name -> containerd.services.introspection.v1.Plugin.ExportsEntry + 6, // 2: containerd.services.introspection.v1.Plugin.init_err:type_name -> google.rpc.Status + 0, // 3: containerd.services.introspection.v1.PluginsResponse.plugins:type_name -> containerd.services.introspection.v1.Plugin + 1, // 4: containerd.services.introspection.v1.Introspection.Plugins:input_type -> containerd.services.introspection.v1.PluginsRequest + 7, // 5: containerd.services.introspection.v1.Introspection.Server:input_type -> google.protobuf.Empty + 2, // 6: containerd.services.introspection.v1.Introspection.Plugins:output_type -> containerd.services.introspection.v1.PluginsResponse + 3, // 7: containerd.services.introspection.v1.Introspection.Server:output_type -> containerd.services.introspection.v1.ServerResponse + 6, // [6:8] is the sub-list for method output_type + 4, // [4:6] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_init() +} +func file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_init() { + if File_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Plugin); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PluginsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PluginsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto = out.File + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto index 8427a068ea..38864f1ec6 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto @@ -21,7 +21,6 @@ package containerd.services.introspection.v1; import "github.com/containerd/containerd/api/types/platform.proto"; import "google/rpc/status.proto"; import "google/protobuf/empty.proto"; -import weak "gogoproto/gogo.proto"; option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection"; @@ -57,7 +56,7 @@ message Plugin { // // If the plugin prefers certain platforms over others, they should be // listed from most to least preferred. - repeated types.Platform platforms = 4 [(gogoproto.nullable) = false]; + repeated types.Platform platforms = 4; // Exports allows plugins to provide values about state or configuration to // interested parties. @@ -96,9 +95,11 @@ message PluginsRequest { } message PluginsResponse { - repeated Plugin plugins = 1 [(gogoproto.nullable) = false]; + repeated Plugin plugins = 1; } message ServerResponse { - string uuid = 1 [(gogoproto.customname) = "UUID"]; + string uuid = 1; + uint64 pid = 2; + uint64 pidns = 3; // PID namespace, such as 4026531836 } diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_grpc.pb.go new file mode 100644 index 0000000000..c2cf80765c --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_grpc.pb.go @@ -0,0 +1,152 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/introspection/v1/introspection.proto + +package introspection + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// IntrospectionClient is the client API for Introspection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type IntrospectionClient interface { + // Plugins returns a list of plugins in containerd. + // + // Clients can use this to detect features and capabilities when using + // containerd. + Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) + // Server returns information about the containerd server + Server(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ServerResponse, error) +} + +type introspectionClient struct { + cc grpc.ClientConnInterface +} + +func NewIntrospectionClient(cc grpc.ClientConnInterface) IntrospectionClient { + return &introspectionClient{cc} +} + +func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) { + out := new(PluginsResponse) + err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Plugins", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *introspectionClient) Server(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ServerResponse, error) { + out := new(ServerResponse) + err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Server", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IntrospectionServer is the server API for Introspection service. +// All implementations must embed UnimplementedIntrospectionServer +// for forward compatibility +type IntrospectionServer interface { + // Plugins returns a list of plugins in containerd. + // + // Clients can use this to detect features and capabilities when using + // containerd. + Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error) + // Server returns information about the containerd server + Server(context.Context, *emptypb.Empty) (*ServerResponse, error) + mustEmbedUnimplementedIntrospectionServer() +} + +// UnimplementedIntrospectionServer must be embedded to have forward compatible implementations. +type UnimplementedIntrospectionServer struct { +} + +func (UnimplementedIntrospectionServer) Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Plugins not implemented") +} +func (UnimplementedIntrospectionServer) Server(context.Context, *emptypb.Empty) (*ServerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Server not implemented") +} +func (UnimplementedIntrospectionServer) mustEmbedUnimplementedIntrospectionServer() {} + +// UnsafeIntrospectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to IntrospectionServer will +// result in compilation errors. +type UnsafeIntrospectionServer interface { + mustEmbedUnimplementedIntrospectionServer() +} + +func RegisterIntrospectionServer(s grpc.ServiceRegistrar, srv IntrospectionServer) { + s.RegisterService(&Introspection_ServiceDesc, srv) +} + +func _Introspection_Plugins_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PluginsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IntrospectionServer).Plugins(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.introspection.v1.Introspection/Plugins", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IntrospectionServer).Plugins(ctx, req.(*PluginsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Introspection_Server_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IntrospectionServer).Server(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.introspection.v1.Introspection/Server", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IntrospectionServer).Server(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// Introspection_ServiceDesc is the grpc.ServiceDesc for Introspection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Introspection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.introspection.v1.Introspection", + HandlerType: (*IntrospectionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Plugins", + Handler: _Introspection_Plugins_Handler, + }, + { + MethodName: "Server", + Handler: _Introspection_Server_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go index 5e7cab71f1..2a66f0f8b8 100644 --- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go @@ -1,3108 +1,961 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/leases/v1/leases.proto package leases import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Lease is an object which retains resources while it exists. type Lease struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - CreatedAt time.Time `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` - Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{0} -} -func (m *Lease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Lease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Lease) Reset() { + *x = Lease{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Lease) XXX_Merge(src proto.Message) { - xxx_messageInfo_Lease.Merge(m, src) -} -func (m *Lease) XXX_Size() int { - return m.Size() -} -func (m *Lease) XXX_DiscardUnknown() { - xxx_messageInfo_Lease.DiscardUnknown(m) + +func (x *Lease) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_Lease proto.InternalMessageInfo +func (*Lease) ProtoMessage() {} + +func (x *Lease) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Lease.ProtoReflect.Descriptor instead. +func (*Lease) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{0} +} + +func (x *Lease) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Lease) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *Lease) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type CreateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // ID is used to identity the lease, when the id is not set the service // generates a random identifier for the lease. - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *CreateRequest) Reset() { *m = CreateRequest{} } -func (*CreateRequest) ProtoMessage() {} -func (*CreateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{1} -} -func (m *CreateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CreateRequest) Reset() { + *x = CreateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CreateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateRequest.Merge(m, src) -} -func (m *CreateRequest) XXX_Size() int { - return m.Size() -} -func (m *CreateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateRequest.DiscardUnknown(m) + +func (x *CreateRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CreateRequest proto.InternalMessageInfo +func (*CreateRequest) ProtoMessage() {} + +func (x *CreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateRequest.ProtoReflect.Descriptor instead. +func (*CreateRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *CreateRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type CreateResponse struct { - Lease *Lease `protobuf:"bytes,1,opt,name=lease,proto3" json:"lease,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Lease *Lease `protobuf:"bytes,1,opt,name=lease,proto3" json:"lease,omitempty"` } -func (m *CreateResponse) Reset() { *m = CreateResponse{} } -func (*CreateResponse) ProtoMessage() {} -func (*CreateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{2} -} -func (m *CreateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CreateResponse) Reset() { + *x = CreateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CreateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateResponse.Merge(m, src) -} -func (m *CreateResponse) XXX_Size() int { - return m.Size() -} -func (m *CreateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateResponse.DiscardUnknown(m) + +func (x *CreateResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CreateResponse proto.InternalMessageInfo +func (*CreateResponse) ProtoMessage() {} + +func (x *CreateResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateResponse.ProtoReflect.Descriptor instead. +func (*CreateResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{2} +} + +func (x *CreateResponse) GetLease() *Lease { + if x != nil { + return x.Lease + } + return nil +} type DeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // Sync indicates that the delete and cleanup should be done // synchronously before returning to the caller // // Default is false - Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"` } -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{3} -} -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteRequest) Reset() { + *x = DeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(m, src) -} -func (m *DeleteRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) + +func (x *DeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo +func (*DeleteRequest) ProtoMessage() {} + +func (x *DeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteRequest.ProtoReflect.Descriptor instead. +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{3} +} + +func (x *DeleteRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *DeleteRequest) GetSync() bool { + if x != nil { + return x.Sync + } + return false +} type ListRequest struct { - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func (m *ListRequest) Reset() { *m = ListRequest{} } -func (*ListRequest) ProtoMessage() {} -func (*ListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{4} -} -func (m *ListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListRequest) Reset() { + *x = ListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListRequest.Merge(m, src) -} -func (m *ListRequest) XXX_Size() int { - return m.Size() -} -func (m *ListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListRequest.DiscardUnknown(m) + +func (x *ListRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListRequest proto.InternalMessageInfo +func (*ListRequest) ProtoMessage() {} + +func (x *ListRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListRequest.ProtoReflect.Descriptor instead. +func (*ListRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{4} +} + +func (x *ListRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil +} type ListResponse struct { - Leases []*Lease `protobuf:"bytes,1,rep,name=leases,proto3" json:"leases,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Leases []*Lease `protobuf:"bytes,1,rep,name=leases,proto3" json:"leases,omitempty"` } -func (m *ListResponse) Reset() { *m = ListResponse{} } -func (*ListResponse) ProtoMessage() {} -func (*ListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{5} -} -func (m *ListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListResponse) Reset() { + *x = ListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListResponse.Merge(m, src) -} -func (m *ListResponse) XXX_Size() int { - return m.Size() -} -func (m *ListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListResponse.DiscardUnknown(m) + +func (x *ListResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListResponse proto.InternalMessageInfo +func (*ListResponse) ProtoMessage() {} + +func (x *ListResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListResponse.ProtoReflect.Descriptor instead. +func (*ListResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{5} +} + +func (x *ListResponse) GetLeases() []*Lease { + if x != nil { + return x.Leases + } + return nil +} type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // For snapshotter resource, there are many snapshotter types here, like // overlayfs, devmapper etc. The type will be formatted with type, // like "snapshotter/overlayfs". - Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` } -func (m *Resource) Reset() { *m = Resource{} } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{6} -} -func (m *Resource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Resource.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Resource) XXX_Merge(src proto.Message) { - xxx_messageInfo_Resource.Merge(m, src) -} -func (m *Resource) XXX_Size() int { - return m.Size() -} -func (m *Resource) XXX_DiscardUnknown() { - xxx_messageInfo_Resource.DiscardUnknown(m) + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_Resource proto.InternalMessageInfo +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{6} +} + +func (x *Resource) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Resource) GetType() string { + if x != nil { + return x.Type + } + return "" +} type AddResourceRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Resource Resource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Resource *Resource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` } -func (m *AddResourceRequest) Reset() { *m = AddResourceRequest{} } -func (*AddResourceRequest) ProtoMessage() {} -func (*AddResourceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{7} -} -func (m *AddResourceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AddResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AddResourceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *AddResourceRequest) Reset() { + *x = AddResourceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *AddResourceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddResourceRequest.Merge(m, src) -} -func (m *AddResourceRequest) XXX_Size() int { - return m.Size() -} -func (m *AddResourceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddResourceRequest.DiscardUnknown(m) + +func (x *AddResourceRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_AddResourceRequest proto.InternalMessageInfo +func (*AddResourceRequest) ProtoMessage() {} + +func (x *AddResourceRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddResourceRequest.ProtoReflect.Descriptor instead. +func (*AddResourceRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{7} +} + +func (x *AddResourceRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *AddResourceRequest) GetResource() *Resource { + if x != nil { + return x.Resource + } + return nil +} type DeleteResourceRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Resource Resource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Resource *Resource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` } -func (m *DeleteResourceRequest) Reset() { *m = DeleteResourceRequest{} } -func (*DeleteResourceRequest) ProtoMessage() {} -func (*DeleteResourceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{8} -} -func (m *DeleteResourceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteResourceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteResourceRequest) Reset() { + *x = DeleteResourceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteResourceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteResourceRequest.Merge(m, src) -} -func (m *DeleteResourceRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteResourceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteResourceRequest.DiscardUnknown(m) + +func (x *DeleteResourceRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_DeleteResourceRequest proto.InternalMessageInfo +func (*DeleteResourceRequest) ProtoMessage() {} + +func (x *DeleteResourceRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteResourceRequest.ProtoReflect.Descriptor instead. +func (*DeleteResourceRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{8} +} + +func (x *DeleteResourceRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *DeleteResourceRequest) GetResource() *Resource { + if x != nil { + return x.Resource + } + return nil +} type ListResourcesRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } -func (m *ListResourcesRequest) Reset() { *m = ListResourcesRequest{} } -func (*ListResourcesRequest) ProtoMessage() {} -func (*ListResourcesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{9} -} -func (m *ListResourcesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListResourcesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListResourcesRequest) Reset() { + *x = ListResourcesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListResourcesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListResourcesRequest.Merge(m, src) -} -func (m *ListResourcesRequest) XXX_Size() int { - return m.Size() -} -func (m *ListResourcesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListResourcesRequest.DiscardUnknown(m) + +func (x *ListResourcesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListResourcesRequest proto.InternalMessageInfo +func (*ListResourcesRequest) ProtoMessage() {} + +func (x *ListResourcesRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListResourcesRequest.ProtoReflect.Descriptor instead. +func (*ListResourcesRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{9} +} + +func (x *ListResourcesRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} type ListResourcesResponse struct { - Resources []Resource `protobuf:"bytes,1,rep,name=resources,proto3" json:"resources"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resources []*Resource `protobuf:"bytes,1,rep,name=resources,proto3" json:"resources,omitempty"` +} + +func (x *ListResourcesResponse) Reset() { + *x = ListResourcesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListResourcesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListResourcesResponse) Reset() { *m = ListResourcesResponse{} } func (*ListResourcesResponse) ProtoMessage() {} + +func (x *ListResourcesResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListResourcesResponse.ProtoReflect.Descriptor instead. func (*ListResourcesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{10} + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{10} } -func (m *ListResourcesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListResourcesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListResourcesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListResourcesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListResourcesResponse.Merge(m, src) -} -func (m *ListResourcesResponse) XXX_Size() int { - return m.Size() -} -func (m *ListResourcesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListResourcesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListResourcesResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Lease)(nil), "containerd.services.leases.v1.Lease") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.leases.v1.Lease.LabelsEntry") - proto.RegisterType((*CreateRequest)(nil), "containerd.services.leases.v1.CreateRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.leases.v1.CreateRequest.LabelsEntry") - proto.RegisterType((*CreateResponse)(nil), "containerd.services.leases.v1.CreateResponse") - proto.RegisterType((*DeleteRequest)(nil), "containerd.services.leases.v1.DeleteRequest") - proto.RegisterType((*ListRequest)(nil), "containerd.services.leases.v1.ListRequest") - proto.RegisterType((*ListResponse)(nil), "containerd.services.leases.v1.ListResponse") - proto.RegisterType((*Resource)(nil), "containerd.services.leases.v1.Resource") - proto.RegisterType((*AddResourceRequest)(nil), "containerd.services.leases.v1.AddResourceRequest") - proto.RegisterType((*DeleteResourceRequest)(nil), "containerd.services.leases.v1.DeleteResourceRequest") - proto.RegisterType((*ListResourcesRequest)(nil), "containerd.services.leases.v1.ListResourcesRequest") - proto.RegisterType((*ListResourcesResponse)(nil), "containerd.services.leases.v1.ListResourcesResponse") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/leases/v1/leases.proto", fileDescriptor_fefd70dfe8d93cbf) -} - -var fileDescriptor_fefd70dfe8d93cbf = []byte{ - // 644 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xce, 0x26, 0xa9, 0x49, 0x26, 0xb4, 0x42, 0xab, 0xb6, 0x8a, 0x8c, 0x48, 0x22, 0x0b, 0xa9, - 0x11, 0x3f, 0x36, 0x4d, 0x2b, 0x54, 0x5a, 0x84, 0xd4, 0xb4, 0x95, 0xa8, 0x88, 0x10, 0xb2, 0x38, - 0x54, 0x1c, 0xa8, 0x1c, 0x7b, 0x1b, 0x2c, 0x9c, 0xd8, 0x78, 0x37, 0x41, 0xe9, 0x89, 0x47, 0xe0, - 0x61, 0x78, 0x88, 0x1e, 0x39, 0x21, 0x4e, 0x85, 0xe6, 0xc6, 0x5b, 0x20, 0xef, 0x0f, 0x6d, 0x5a, - 0xb5, 0x76, 0x11, 0xe2, 0x36, 0x1b, 0x7f, 0xdf, 0xcc, 0x37, 0x33, 0xdf, 0x6e, 0x60, 0xbb, 0xe7, - 0xb3, 0x77, 0xc3, 0xae, 0xe9, 0x86, 0x7d, 0xcb, 0x0d, 0x07, 0xcc, 0xf1, 0x07, 0x24, 0xf6, 0xce, - 0x86, 0x4e, 0xe4, 0x5b, 0x94, 0xc4, 0x23, 0xdf, 0x25, 0xd4, 0x0a, 0x88, 0x43, 0x09, 0xb5, 0x46, - 0xcb, 0x32, 0x32, 0xa3, 0x38, 0x64, 0x21, 0xbe, 0x73, 0x8a, 0x37, 0x15, 0xd6, 0x94, 0x88, 0xd1, - 0xb2, 0x3e, 0xdf, 0x0b, 0x7b, 0x21, 0x47, 0x5a, 0x49, 0x24, 0x48, 0xfa, 0xed, 0x5e, 0x18, 0xf6, - 0x02, 0x62, 0xf1, 0x53, 0x77, 0x78, 0x60, 0x91, 0x7e, 0xc4, 0xc6, 0xf2, 0x63, 0xfd, 0xfc, 0x47, - 0xe6, 0xf7, 0x09, 0x65, 0x4e, 0x3f, 0x12, 0x00, 0xe3, 0x17, 0x82, 0x99, 0x4e, 0x52, 0x01, 0x2f, - 0x42, 0xde, 0xf7, 0xaa, 0xa8, 0x81, 0x9a, 0xe5, 0xb6, 0x36, 0x39, 0xae, 0xe7, 0x77, 0xb7, 0xed, - 0xbc, 0xef, 0xe1, 0x2d, 0x00, 0x37, 0x26, 0x0e, 0x23, 0xde, 0xbe, 0xc3, 0xaa, 0xf9, 0x06, 0x6a, - 0x56, 0x5a, 0xba, 0x29, 0xf2, 0x9a, 0x2a, 0xaf, 0xf9, 0x5a, 0xe5, 0x6d, 0x97, 0x8e, 0x8e, 0xeb, - 0xb9, 0xcf, 0x3f, 0xea, 0xc8, 0x2e, 0x4b, 0xde, 0x26, 0xc3, 0xcf, 0x41, 0x0b, 0x9c, 0x2e, 0x09, - 0x68, 0xb5, 0xd0, 0x28, 0x34, 0x2b, 0xad, 0x47, 0xe6, 0x95, 0xad, 0x9a, 0x5c, 0x92, 0xd9, 0xe1, - 0x94, 0x9d, 0x01, 0x8b, 0xc7, 0xb6, 0xe4, 0xeb, 0x4f, 0xa0, 0x72, 0xe6, 0x67, 0x7c, 0x0b, 0x0a, - 0xef, 0xc9, 0x58, 0xc8, 0xb6, 0x93, 0x10, 0xcf, 0xc3, 0xcc, 0xc8, 0x09, 0x86, 0x84, 0x4b, 0x2d, - 0xdb, 0xe2, 0xb0, 0x9e, 0x5f, 0x43, 0xc6, 0x17, 0x04, 0xb3, 0x5b, 0x5c, 0x92, 0x4d, 0x3e, 0x0c, - 0x09, 0x65, 0x97, 0xf6, 0xfc, 0xea, 0x9c, 0xdc, 0xb5, 0x14, 0xb9, 0x53, 0x59, 0xff, 0xb5, 0xec, - 0x0e, 0xcc, 0xa9, 0xfc, 0x34, 0x0a, 0x07, 0x94, 0xe0, 0x75, 0x98, 0xe1, 0xb5, 0x39, 0xbf, 0xd2, - 0xba, 0x9b, 0x65, 0x98, 0xb6, 0xa0, 0x18, 0x1b, 0x30, 0xbb, 0x4d, 0x02, 0x92, 0x3e, 0x03, 0x0c, - 0x45, 0x3a, 0x1e, 0xb8, 0x5c, 0x4f, 0xc9, 0xe6, 0xb1, 0xb1, 0x04, 0x95, 0x8e, 0x4f, 0x99, 0xa2, - 0x56, 0xe1, 0xc6, 0x81, 0x1f, 0x30, 0x12, 0xd3, 0x2a, 0x6a, 0x14, 0x9a, 0x65, 0x5b, 0x1d, 0x8d, - 0x0e, 0xdc, 0x14, 0x40, 0xa9, 0xf8, 0x29, 0x68, 0x42, 0x0f, 0x07, 0x66, 0x95, 0x2c, 0x39, 0xc6, - 0x63, 0x28, 0xd9, 0x84, 0x86, 0xc3, 0xd8, 0x25, 0x57, 0xc9, 0x65, 0xe3, 0x48, 0x8d, 0x8f, 0xc7, - 0xc6, 0x47, 0xc0, 0x9b, 0x9e, 0xa7, 0xa8, 0x69, 0x0d, 0xef, 0x42, 0x29, 0x96, 0x50, 0x69, 0xf3, - 0xa5, 0x14, 0x95, 0x2a, 0x73, 0xbb, 0x98, 0x78, 0xde, 0xfe, 0x43, 0x37, 0x0e, 0x61, 0x41, 0x0d, - 0xf9, 0xbf, 0xd7, 0x36, 0x61, 0x5e, 0x8e, 0x9e, 0x9f, 0x69, 0x4a, 0x69, 0xc3, 0x83, 0x85, 0x73, - 0x78, 0xb9, 0xb3, 0x17, 0x50, 0x56, 0x49, 0xd5, 0xda, 0xae, 0x29, 0xea, 0x94, 0xdf, 0xfa, 0x56, - 0x04, 0x8d, 0x2f, 0x95, 0x62, 0x02, 0x9a, 0xf0, 0x33, 0x7e, 0x70, 0x9d, 0x6b, 0xa5, 0x3f, 0xcc, - 0x88, 0x96, 0xf2, 0x5f, 0x82, 0x26, 0x76, 0x90, 0x5a, 0x66, 0xea, 0x3e, 0xe8, 0x8b, 0x17, 0xde, - 0xb6, 0x9d, 0xe4, 0x41, 0xc5, 0xfb, 0x50, 0x4c, 0xe6, 0x84, 0xef, 0xa5, 0x59, 0xf7, 0xf4, 0x82, - 0xe8, 0xf7, 0x33, 0x61, 0xa5, 0xe0, 0x3d, 0xa8, 0x9c, 0x71, 0x2b, 0x5e, 0x4e, 0xe1, 0x5e, 0x74, - 0xf6, 0xa5, 0xd2, 0xdf, 0xc2, 0xdc, 0xb4, 0x1d, 0xf1, 0x6a, 0xc6, 0x91, 0x64, 0xcb, 0x7f, 0x08, - 0xb3, 0x53, 0x16, 0xc2, 0x2b, 0xd9, 0xfa, 0x9e, 0x32, 0xa8, 0xbe, 0x7a, 0x3d, 0x92, 0x98, 0x5a, - 0x7b, 0xef, 0xe8, 0xa4, 0x96, 0xfb, 0x7e, 0x52, 0xcb, 0x7d, 0x9a, 0xd4, 0xd0, 0xd1, 0xa4, 0x86, - 0xbe, 0x4e, 0x6a, 0xe8, 0xe7, 0xa4, 0x86, 0xde, 0x3c, 0xfb, 0xcb, 0xff, 0xe4, 0x0d, 0x11, 0xed, - 0xe5, 0xba, 0x1a, 0xef, 0x73, 0xe5, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0xfe, 0x39, 0x67, - 0xde, 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// LeasesClient is the client API for Leases service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LeasesClient interface { - // Create creates a new lease for managing changes to metadata. A lease - // can be used to protect objects from being removed. - Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) - // Delete deletes the lease and makes any unreferenced objects created - // during the lease eligible for garbage collection if not referenced - // or retained by other resources during the lease. - Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*types.Empty, error) - // List lists all active leases, returning the full list of - // leases and optionally including the referenced resources. - List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) - // AddResource references the resource by the provided lease. - AddResource(ctx context.Context, in *AddResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) - // DeleteResource dereferences the resource by the provided lease. - DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) - // ListResources lists all the resources referenced by the lease. - ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) -} - -type leasesClient struct { - cc *grpc.ClientConn -} - -func NewLeasesClient(cc *grpc.ClientConn) LeasesClient { - return &leasesClient{cc} -} - -func (c *leasesClient) Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) { - out := new(CreateResponse) - err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/Create", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leasesClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leasesClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) { - out := new(ListResponse) - err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/List", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leasesClient) AddResource(ctx context.Context, in *AddResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/AddResource", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leasesClient) DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/DeleteResource", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leasesClient) ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) { - out := new(ListResourcesResponse) - err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/ListResources", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// LeasesServer is the server API for Leases service. -type LeasesServer interface { - // Create creates a new lease for managing changes to metadata. A lease - // can be used to protect objects from being removed. - Create(context.Context, *CreateRequest) (*CreateResponse, error) - // Delete deletes the lease and makes any unreferenced objects created - // during the lease eligible for garbage collection if not referenced - // or retained by other resources during the lease. - Delete(context.Context, *DeleteRequest) (*types.Empty, error) - // List lists all active leases, returning the full list of - // leases and optionally including the referenced resources. - List(context.Context, *ListRequest) (*ListResponse, error) - // AddResource references the resource by the provided lease. - AddResource(context.Context, *AddResourceRequest) (*types.Empty, error) - // DeleteResource dereferences the resource by the provided lease. - DeleteResource(context.Context, *DeleteResourceRequest) (*types.Empty, error) - // ListResources lists all the resources referenced by the lease. - ListResources(context.Context, *ListResourcesRequest) (*ListResourcesResponse, error) -} - -// UnimplementedLeasesServer can be embedded to have forward compatible implementations. -type UnimplementedLeasesServer struct { -} - -func (*UnimplementedLeasesServer) Create(ctx context.Context, req *CreateRequest) (*CreateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") -} -func (*UnimplementedLeasesServer) Delete(ctx context.Context, req *DeleteRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (*UnimplementedLeasesServer) List(ctx context.Context, req *ListRequest) (*ListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedLeasesServer) AddResource(ctx context.Context, req *AddResourceRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddResource not implemented") -} -func (*UnimplementedLeasesServer) DeleteResource(ctx context.Context, req *DeleteResourceRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteResource not implemented") -} -func (*UnimplementedLeasesServer) ListResources(ctx context.Context, req *ListResourcesRequest) (*ListResourcesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListResources not implemented") -} - -func RegisterLeasesServer(s *grpc.Server, srv LeasesServer) { - s.RegisterService(&_Leases_serviceDesc, srv) -} - -func _Leases_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeasesServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.leases.v1.Leases/Create", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeasesServer).Create(ctx, req.(*CreateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Leases_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeasesServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.leases.v1.Leases/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeasesServer).Delete(ctx, req.(*DeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Leases_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeasesServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.leases.v1.Leases/List", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeasesServer).List(ctx, req.(*ListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Leases_AddResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddResourceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeasesServer).AddResource(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.leases.v1.Leases/AddResource", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeasesServer).AddResource(ctx, req.(*AddResourceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Leases_DeleteResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteResourceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeasesServer).DeleteResource(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.leases.v1.Leases/DeleteResource", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeasesServer).DeleteResource(ctx, req.(*DeleteResourceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Leases_ListResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListResourcesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeasesServer).ListResources(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.leases.v1.Leases/ListResources", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeasesServer).ListResources(ctx, req.(*ListResourcesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Leases_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.leases.v1.Leases", - HandlerType: (*LeasesServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Create", - Handler: _Leases_Create_Handler, - }, - { - MethodName: "Delete", - Handler: _Leases_Delete_Handler, - }, - { - MethodName: "List", - Handler: _Leases_List_Handler, - }, - { - MethodName: "AddResource", - Handler: _Leases_AddResource_Handler, - }, - { - MethodName: "DeleteResource", - Handler: _Leases_DeleteResource_Handler, - }, - { - MethodName: "ListResources", - Handler: _Leases_ListResources_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/leases/v1/leases.proto", -} - -func (m *Lease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintLeases(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintLeases(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintLeases(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintLeases(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x12 - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CreateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintLeases(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintLeases(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintLeases(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CreateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Lease != nil { - { - size, err := m.Lease.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLeases(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Sync { - i-- - if m.Sync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintLeases(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Leases) > 0 { - for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLeases(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Resource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Resource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintLeases(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AddResourceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddResourceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AddResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLeases(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteResourceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteResourceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLeases(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListResourcesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListResourcesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListResourcesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListResourcesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListResourcesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLeases(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintLeases(dAtA []byte, offset int, v uint64) int { - offset -= sovLeases(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Lease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovLeases(uint64(l)) - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v))) - n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v))) - n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Lease != nil { - l = m.Lease.Size() - n += 1 + l + sovLeases(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - if m.Sync { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovLeases(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Leases) > 0 { - for _, e := range m.Leases { - l = e.Size() - n += 1 + l + sovLeases(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Resource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AddResourceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - l = m.Resource.Size() - n += 1 + l + sovLeases(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteResourceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - l = m.Resource.Size() - n += 1 + l + sovLeases(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListResourcesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListResourcesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Resources) > 0 { - for _, e := range m.Resources { - l = e.Size() - n += 1 + l + sovLeases(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovLeases(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozLeases(x uint64) (n int) { - return sovLeases(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Lease) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&Lease{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateRequest) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&CreateRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateResponse{`, - `Lease:` + strings.Replace(this.Lease.String(), "Lease", "Lease", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Sync:` + fmt.Sprintf("%v", this.Sync) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForLeases := "[]*Lease{" - for _, f := range this.Leases { - repeatedStringForLeases += strings.Replace(f.String(), "Lease", "Lease", 1) + "," - } - repeatedStringForLeases += "}" - s := strings.Join([]string{`&ListResponse{`, - `Leases:` + repeatedStringForLeases + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Resource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Resource{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *AddResourceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AddResourceRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "Resource", "Resource", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteResourceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteResourceRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "Resource", "Resource", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListResourcesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListResourcesRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListResourcesResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForResources := "[]Resource{" - for _, f := range this.Resources { - repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "Resource", "Resource", 1), `&`, ``, 1) + "," - } - repeatedStringForResources += "}" - s := strings.Join([]string{`&ListResourcesResponse{`, - `Resources:` + repeatedStringForResources + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringLeases(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Lease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthLeases - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthLeases - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *ListResourcesResponse) GetResources() []*Resource { + if x != nil { + return x.Resources } return nil } -func (m *CreateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthLeases - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthLeases - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Lease == nil { - m.Lease = &Lease{} - } - if err := m.Lease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +var File_github_com_containerd_containerd_api_services_leases_v1_leases_proto protoreflect.FileDescriptor - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Sync = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Leases = append(m.Leases, &Lease{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Resource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Resource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddResourceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddResourceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteResourceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteResourceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListResourcesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListResourcesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListResourcesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListResourcesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, Resource{}) - if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipLeases(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLeases - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLeases - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLeases - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthLeases - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupLeases - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthLeases - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xd7, 0x01, 0x0a, 0x05, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x39, 0x0a, + 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x48, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, + 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xac, 0x01, + 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x50, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x0e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, + 0x0a, 0x05, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x65, + 0x61, 0x73, 0x65, 0x52, 0x05, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x22, 0x33, 0x0a, 0x0d, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, + 0x79, 0x6e, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x22, + 0x27, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, + 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x4c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, + 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x06, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x22, 0x2e, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x69, 0x0a, 0x12, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x43, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x22, 0x6c, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x43, 0x0a, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, + 0x26, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x5e, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x32, 0xd6, 0x04, 0x0a, 0x06, 0x4c, 0x65, 0x61, 0x73, + 0x65, 0x73, 0x12, 0x65, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x2c, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5f, 0x0a, 0x04, 0x4c, 0x69, 0x73, + 0x74, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x0b, 0x41, 0x64, + 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, + 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x7a, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2f, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthLeases = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowLeases = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupLeases = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescData = file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_goTypes = []interface{}{ + (*Lease)(nil), // 0: containerd.services.leases.v1.Lease + (*CreateRequest)(nil), // 1: containerd.services.leases.v1.CreateRequest + (*CreateResponse)(nil), // 2: containerd.services.leases.v1.CreateResponse + (*DeleteRequest)(nil), // 3: containerd.services.leases.v1.DeleteRequest + (*ListRequest)(nil), // 4: containerd.services.leases.v1.ListRequest + (*ListResponse)(nil), // 5: containerd.services.leases.v1.ListResponse + (*Resource)(nil), // 6: containerd.services.leases.v1.Resource + (*AddResourceRequest)(nil), // 7: containerd.services.leases.v1.AddResourceRequest + (*DeleteResourceRequest)(nil), // 8: containerd.services.leases.v1.DeleteResourceRequest + (*ListResourcesRequest)(nil), // 9: containerd.services.leases.v1.ListResourcesRequest + (*ListResourcesResponse)(nil), // 10: containerd.services.leases.v1.ListResourcesResponse + nil, // 11: containerd.services.leases.v1.Lease.LabelsEntry + nil, // 12: containerd.services.leases.v1.CreateRequest.LabelsEntry + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 14: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_depIdxs = []int32{ + 13, // 0: containerd.services.leases.v1.Lease.created_at:type_name -> google.protobuf.Timestamp + 11, // 1: containerd.services.leases.v1.Lease.labels:type_name -> containerd.services.leases.v1.Lease.LabelsEntry + 12, // 2: containerd.services.leases.v1.CreateRequest.labels:type_name -> containerd.services.leases.v1.CreateRequest.LabelsEntry + 0, // 3: containerd.services.leases.v1.CreateResponse.lease:type_name -> containerd.services.leases.v1.Lease + 0, // 4: containerd.services.leases.v1.ListResponse.leases:type_name -> containerd.services.leases.v1.Lease + 6, // 5: containerd.services.leases.v1.AddResourceRequest.resource:type_name -> containerd.services.leases.v1.Resource + 6, // 6: containerd.services.leases.v1.DeleteResourceRequest.resource:type_name -> containerd.services.leases.v1.Resource + 6, // 7: containerd.services.leases.v1.ListResourcesResponse.resources:type_name -> containerd.services.leases.v1.Resource + 1, // 8: containerd.services.leases.v1.Leases.Create:input_type -> containerd.services.leases.v1.CreateRequest + 3, // 9: containerd.services.leases.v1.Leases.Delete:input_type -> containerd.services.leases.v1.DeleteRequest + 4, // 10: containerd.services.leases.v1.Leases.List:input_type -> containerd.services.leases.v1.ListRequest + 7, // 11: containerd.services.leases.v1.Leases.AddResource:input_type -> containerd.services.leases.v1.AddResourceRequest + 8, // 12: containerd.services.leases.v1.Leases.DeleteResource:input_type -> containerd.services.leases.v1.DeleteResourceRequest + 9, // 13: containerd.services.leases.v1.Leases.ListResources:input_type -> containerd.services.leases.v1.ListResourcesRequest + 2, // 14: containerd.services.leases.v1.Leases.Create:output_type -> containerd.services.leases.v1.CreateResponse + 14, // 15: containerd.services.leases.v1.Leases.Delete:output_type -> google.protobuf.Empty + 5, // 16: containerd.services.leases.v1.Leases.List:output_type -> containerd.services.leases.v1.ListResponse + 14, // 17: containerd.services.leases.v1.Leases.AddResource:output_type -> google.protobuf.Empty + 14, // 18: containerd.services.leases.v1.Leases.DeleteResource:output_type -> google.protobuf.Empty + 10, // 19: containerd.services.leases.v1.Leases.ListResources:output_type -> containerd.services.leases.v1.ListResourcesResponse + 14, // [14:20] is the sub-list for method output_type + 8, // [8:14] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_init() } +func file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_init() { + if File_github_com_containerd_containerd_api_services_leases_v1_leases_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Lease); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddResourceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResourceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListResourcesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListResourcesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDesc, + NumEnums: 0, + NumMessages: 13, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_leases_v1_leases_proto = out.File + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto index 6aa61faedf..8551fcea7f 100644 --- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto +++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto @@ -17,7 +17,6 @@ syntax = "proto3"; package containerd.services.leases.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; @@ -52,7 +51,7 @@ service Leases { message Lease { string id = 1; - google.protobuf.Timestamp created_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp created_at = 2; map labels = 3; } @@ -99,13 +98,13 @@ message Resource { message AddResourceRequest { string id = 1; - Resource resource = 2 [(gogoproto.nullable) = false]; + Resource resource = 2; } message DeleteResourceRequest { string id = 1; - Resource resource = 2 [(gogoproto.nullable) = false]; + Resource resource = 2; } message ListResourcesRequest { @@ -113,5 +112,5 @@ message ListResourcesRequest { } message ListResourcesResponse { - repeated Resource resources = 1 [(gogoproto.nullable) = false]; + repeated Resource resources = 1 ; } diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases_grpc.pb.go new file mode 100644 index 0000000000..1ecf91ecf1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases_grpc.pb.go @@ -0,0 +1,306 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/leases/v1/leases.proto + +package leases + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// LeasesClient is the client API for Leases service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LeasesClient interface { + // Create creates a new lease for managing changes to metadata. A lease + // can be used to protect objects from being removed. + Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) + // Delete deletes the lease and makes any unreferenced objects created + // during the lease eligible for garbage collection if not referenced + // or retained by other resources during the lease. + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // List lists all active leases, returning the full list of + // leases and optionally including the referenced resources. + List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) + // AddResource references the resource by the provided lease. + AddResource(ctx context.Context, in *AddResourceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // DeleteResource dereferences the resource by the provided lease. + DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // ListResources lists all the resources referenced by the lease. + ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) +} + +type leasesClient struct { + cc grpc.ClientConnInterface +} + +func NewLeasesClient(cc grpc.ClientConnInterface) LeasesClient { + return &leasesClient{cc} +} + +func (c *leasesClient) Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) { + out := new(CreateResponse) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) { + out := new(ListResponse) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) AddResource(ctx context.Context, in *AddResourceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/AddResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/DeleteResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) { + out := new(ListResourcesResponse) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/ListResources", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LeasesServer is the server API for Leases service. +// All implementations must embed UnimplementedLeasesServer +// for forward compatibility +type LeasesServer interface { + // Create creates a new lease for managing changes to metadata. A lease + // can be used to protect objects from being removed. + Create(context.Context, *CreateRequest) (*CreateResponse, error) + // Delete deletes the lease and makes any unreferenced objects created + // during the lease eligible for garbage collection if not referenced + // or retained by other resources during the lease. + Delete(context.Context, *DeleteRequest) (*emptypb.Empty, error) + // List lists all active leases, returning the full list of + // leases and optionally including the referenced resources. + List(context.Context, *ListRequest) (*ListResponse, error) + // AddResource references the resource by the provided lease. + AddResource(context.Context, *AddResourceRequest) (*emptypb.Empty, error) + // DeleteResource dereferences the resource by the provided lease. + DeleteResource(context.Context, *DeleteResourceRequest) (*emptypb.Empty, error) + // ListResources lists all the resources referenced by the lease. + ListResources(context.Context, *ListResourcesRequest) (*ListResourcesResponse, error) + mustEmbedUnimplementedLeasesServer() +} + +// UnimplementedLeasesServer must be embedded to have forward compatible implementations. +type UnimplementedLeasesServer struct { +} + +func (UnimplementedLeasesServer) Create(context.Context, *CreateRequest) (*CreateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedLeasesServer) Delete(context.Context, *DeleteRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedLeasesServer) List(context.Context, *ListRequest) (*ListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedLeasesServer) AddResource(context.Context, *AddResourceRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddResource not implemented") +} +func (UnimplementedLeasesServer) DeleteResource(context.Context, *DeleteResourceRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteResource not implemented") +} +func (UnimplementedLeasesServer) ListResources(context.Context, *ListResourcesRequest) (*ListResourcesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListResources not implemented") +} +func (UnimplementedLeasesServer) mustEmbedUnimplementedLeasesServer() {} + +// UnsafeLeasesServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LeasesServer will +// result in compilation errors. +type UnsafeLeasesServer interface { + mustEmbedUnimplementedLeasesServer() +} + +func RegisterLeasesServer(s grpc.ServiceRegistrar, srv LeasesServer) { + s.RegisterService(&Leases_ServiceDesc, srv) +} + +func _Leases_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).Create(ctx, req.(*CreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).Delete(ctx, req.(*DeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).List(ctx, req.(*ListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_AddResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).AddResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/AddResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).AddResource(ctx, req.(*AddResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_DeleteResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).DeleteResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/DeleteResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).DeleteResource(ctx, req.(*DeleteResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_ListResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListResourcesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).ListResources(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/ListResources", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).ListResources(ctx, req.(*ListResourcesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Leases_ServiceDesc is the grpc.ServiceDesc for Leases service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Leases_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.leases.v1.Leases", + HandlerType: (*LeasesServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Create", + Handler: _Leases_Create_Handler, + }, + { + MethodName: "Delete", + Handler: _Leases_Delete_Handler, + }, + { + MethodName: "List", + Handler: _Leases_List_Handler, + }, + { + MethodName: "AddResource", + Handler: _Leases_AddResource_Handler, + }, + { + MethodName: "DeleteResource", + Handler: _Leases_DeleteResource_Handler, + }, + { + MethodName: "ListResources", + Handler: _Leases_ListResources_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/leases/v1/leases.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go index 76f9e11726..a75a315c43 100644 --- a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go @@ -1,36 +1,47 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto package namespaces import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Namespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Labels provides an area to include arbitrary data on namespaces. // @@ -38,277 +49,336 @@ type Namespace struct { // // Note that to add a new value to this field, read the existing set and // include the entire result in the update call. - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *Namespace) Reset() { *m = Namespace{} } -func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{0} -} -func (m *Namespace) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Namespace.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Namespace) Reset() { + *x = Namespace{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Namespace) XXX_Merge(src proto.Message) { - xxx_messageInfo_Namespace.Merge(m, src) -} -func (m *Namespace) XXX_Size() int { - return m.Size() -} -func (m *Namespace) XXX_DiscardUnknown() { - xxx_messageInfo_Namespace.DiscardUnknown(m) + +func (x *Namespace) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_Namespace proto.InternalMessageInfo +func (*Namespace) ProtoMessage() {} + +func (x *Namespace) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Namespace.ProtoReflect.Descriptor instead. +func (*Namespace) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{0} +} + +func (x *Namespace) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Namespace) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type GetNamespaceRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (m *GetNamespaceRequest) Reset() { *m = GetNamespaceRequest{} } -func (*GetNamespaceRequest) ProtoMessage() {} -func (*GetNamespaceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{1} -} -func (m *GetNamespaceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetNamespaceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetNamespaceRequest) Reset() { + *x = GetNamespaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *GetNamespaceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNamespaceRequest.Merge(m, src) -} -func (m *GetNamespaceRequest) XXX_Size() int { - return m.Size() -} -func (m *GetNamespaceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetNamespaceRequest.DiscardUnknown(m) + +func (x *GetNamespaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_GetNamespaceRequest proto.InternalMessageInfo +func (*GetNamespaceRequest) ProtoMessage() {} + +func (x *GetNamespaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNamespaceRequest.ProtoReflect.Descriptor instead. +func (*GetNamespaceRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{1} +} + +func (x *GetNamespaceRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} type GetNamespaceResponse struct { - Namespace Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` } -func (m *GetNamespaceResponse) Reset() { *m = GetNamespaceResponse{} } -func (*GetNamespaceResponse) ProtoMessage() {} -func (*GetNamespaceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{2} -} -func (m *GetNamespaceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetNamespaceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetNamespaceResponse) Reset() { + *x = GetNamespaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *GetNamespaceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNamespaceResponse.Merge(m, src) -} -func (m *GetNamespaceResponse) XXX_Size() int { - return m.Size() -} -func (m *GetNamespaceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetNamespaceResponse.DiscardUnknown(m) + +func (x *GetNamespaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_GetNamespaceResponse proto.InternalMessageInfo +func (*GetNamespaceResponse) ProtoMessage() {} + +func (x *GetNamespaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNamespaceResponse.ProtoReflect.Descriptor instead. +func (*GetNamespaceResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{2} +} + +func (x *GetNamespaceResponse) GetNamespace() *Namespace { + if x != nil { + return x.Namespace + } + return nil +} type ListNamespacesRequest struct { - Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` } -func (m *ListNamespacesRequest) Reset() { *m = ListNamespacesRequest{} } -func (*ListNamespacesRequest) ProtoMessage() {} -func (*ListNamespacesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{3} -} -func (m *ListNamespacesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListNamespacesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListNamespacesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListNamespacesRequest) Reset() { + *x = ListNamespacesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListNamespacesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListNamespacesRequest.Merge(m, src) -} -func (m *ListNamespacesRequest) XXX_Size() int { - return m.Size() -} -func (m *ListNamespacesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListNamespacesRequest.DiscardUnknown(m) + +func (x *ListNamespacesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListNamespacesRequest proto.InternalMessageInfo +func (*ListNamespacesRequest) ProtoMessage() {} + +func (x *ListNamespacesRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNamespacesRequest.ProtoReflect.Descriptor instead. +func (*ListNamespacesRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{3} +} + +func (x *ListNamespacesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} type ListNamespacesResponse struct { - Namespaces []Namespace `protobuf:"bytes,1,rep,name=namespaces,proto3" json:"namespaces"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespaces []*Namespace `protobuf:"bytes,1,rep,name=namespaces,proto3" json:"namespaces,omitempty"` } -func (m *ListNamespacesResponse) Reset() { *m = ListNamespacesResponse{} } -func (*ListNamespacesResponse) ProtoMessage() {} -func (*ListNamespacesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{4} -} -func (m *ListNamespacesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListNamespacesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListNamespacesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListNamespacesResponse) Reset() { + *x = ListNamespacesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListNamespacesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListNamespacesResponse.Merge(m, src) -} -func (m *ListNamespacesResponse) XXX_Size() int { - return m.Size() -} -func (m *ListNamespacesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListNamespacesResponse.DiscardUnknown(m) + +func (x *ListNamespacesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListNamespacesResponse proto.InternalMessageInfo +func (*ListNamespacesResponse) ProtoMessage() {} + +func (x *ListNamespacesResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNamespacesResponse.ProtoReflect.Descriptor instead. +func (*ListNamespacesResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{4} +} + +func (x *ListNamespacesResponse) GetNamespaces() []*Namespace { + if x != nil { + return x.Namespaces + } + return nil +} type CreateNamespaceRequest struct { - Namespace Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` } -func (m *CreateNamespaceRequest) Reset() { *m = CreateNamespaceRequest{} } -func (*CreateNamespaceRequest) ProtoMessage() {} -func (*CreateNamespaceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{5} -} -func (m *CreateNamespaceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateNamespaceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CreateNamespaceRequest) Reset() { + *x = CreateNamespaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CreateNamespaceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateNamespaceRequest.Merge(m, src) -} -func (m *CreateNamespaceRequest) XXX_Size() int { - return m.Size() -} -func (m *CreateNamespaceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateNamespaceRequest.DiscardUnknown(m) + +func (x *CreateNamespaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CreateNamespaceRequest proto.InternalMessageInfo +func (*CreateNamespaceRequest) ProtoMessage() {} + +func (x *CreateNamespaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateNamespaceRequest.ProtoReflect.Descriptor instead. +func (*CreateNamespaceRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{5} +} + +func (x *CreateNamespaceRequest) GetNamespace() *Namespace { + if x != nil { + return x.Namespace + } + return nil +} type CreateNamespaceResponse struct { - Namespace Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` } -func (m *CreateNamespaceResponse) Reset() { *m = CreateNamespaceResponse{} } -func (*CreateNamespaceResponse) ProtoMessage() {} -func (*CreateNamespaceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{6} -} -func (m *CreateNamespaceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateNamespaceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CreateNamespaceResponse) Reset() { + *x = CreateNamespaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CreateNamespaceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateNamespaceResponse.Merge(m, src) -} -func (m *CreateNamespaceResponse) XXX_Size() int { - return m.Size() -} -func (m *CreateNamespaceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateNamespaceResponse.DiscardUnknown(m) + +func (x *CreateNamespaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CreateNamespaceResponse proto.InternalMessageInfo +func (*CreateNamespaceResponse) ProtoMessage() {} + +func (x *CreateNamespaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateNamespaceResponse.ProtoReflect.Descriptor instead. +func (*CreateNamespaceResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{6} +} + +func (x *CreateNamespaceResponse) GetNamespace() *Namespace { + if x != nil { + return x.Namespace + } + return nil +} // UpdateNamespaceRequest updates the metadata for a namespace. // @@ -316,2203 +386,482 @@ var xxx_messageInfo_CreateNamespaceResponse proto.InternalMessageInfo // https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask, // unless otherwise qualified. type UpdateNamespaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Namespace provides the target value, as declared by the mask, for the update. // // The namespace field must be set. - Namespace Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"` + Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. // // For the most part, this applies only to selectively updating labels on // the namespace. While field masks are typically limited to ascii alphas // and digits, we just take everything after the "labels." as the map key. - UpdateMask *types.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } -func (m *UpdateNamespaceRequest) Reset() { *m = UpdateNamespaceRequest{} } -func (*UpdateNamespaceRequest) ProtoMessage() {} -func (*UpdateNamespaceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{7} -} -func (m *UpdateNamespaceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateNamespaceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateNamespaceRequest) Reset() { + *x = UpdateNamespaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateNamespaceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateNamespaceRequest.Merge(m, src) -} -func (m *UpdateNamespaceRequest) XXX_Size() int { - return m.Size() -} -func (m *UpdateNamespaceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateNamespaceRequest.DiscardUnknown(m) + +func (x *UpdateNamespaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_UpdateNamespaceRequest proto.InternalMessageInfo +func (*UpdateNamespaceRequest) ProtoMessage() {} + +func (x *UpdateNamespaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateNamespaceRequest.ProtoReflect.Descriptor instead. +func (*UpdateNamespaceRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{7} +} + +func (x *UpdateNamespaceRequest) GetNamespace() *Namespace { + if x != nil { + return x.Namespace + } + return nil +} + +func (x *UpdateNamespaceRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} type UpdateNamespaceResponse struct { - Namespace Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` } -func (m *UpdateNamespaceResponse) Reset() { *m = UpdateNamespaceResponse{} } -func (*UpdateNamespaceResponse) ProtoMessage() {} -func (*UpdateNamespaceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{8} -} -func (m *UpdateNamespaceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateNamespaceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateNamespaceResponse) Reset() { + *x = UpdateNamespaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateNamespaceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateNamespaceResponse.Merge(m, src) -} -func (m *UpdateNamespaceResponse) XXX_Size() int { - return m.Size() -} -func (m *UpdateNamespaceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateNamespaceResponse.DiscardUnknown(m) + +func (x *UpdateNamespaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_UpdateNamespaceResponse proto.InternalMessageInfo +func (*UpdateNamespaceResponse) ProtoMessage() {} + +func (x *UpdateNamespaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateNamespaceResponse.ProtoReflect.Descriptor instead. +func (*UpdateNamespaceResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{8} +} + +func (x *UpdateNamespaceResponse) GetNamespace() *Namespace { + if x != nil { + return x.Namespace + } + return nil +} type DeleteNamespaceRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteNamespaceRequest) Reset() { + *x = DeleteNamespaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteNamespaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteNamespaceRequest) Reset() { *m = DeleteNamespaceRequest{} } func (*DeleteNamespaceRequest) ProtoMessage() {} + +func (x *DeleteNamespaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteNamespaceRequest.ProtoReflect.Descriptor instead. func (*DeleteNamespaceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{9} -} -func (m *DeleteNamespaceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteNamespaceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteNamespaceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteNamespaceRequest.Merge(m, src) -} -func (m *DeleteNamespaceRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteNamespaceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteNamespaceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteNamespaceRequest proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Namespace)(nil), "containerd.services.namespaces.v1.Namespace") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.namespaces.v1.Namespace.LabelsEntry") - proto.RegisterType((*GetNamespaceRequest)(nil), "containerd.services.namespaces.v1.GetNamespaceRequest") - proto.RegisterType((*GetNamespaceResponse)(nil), "containerd.services.namespaces.v1.GetNamespaceResponse") - proto.RegisterType((*ListNamespacesRequest)(nil), "containerd.services.namespaces.v1.ListNamespacesRequest") - proto.RegisterType((*ListNamespacesResponse)(nil), "containerd.services.namespaces.v1.ListNamespacesResponse") - proto.RegisterType((*CreateNamespaceRequest)(nil), "containerd.services.namespaces.v1.CreateNamespaceRequest") - proto.RegisterType((*CreateNamespaceResponse)(nil), "containerd.services.namespaces.v1.CreateNamespaceResponse") - proto.RegisterType((*UpdateNamespaceRequest)(nil), "containerd.services.namespaces.v1.UpdateNamespaceRequest") - proto.RegisterType((*UpdateNamespaceResponse)(nil), "containerd.services.namespaces.v1.UpdateNamespaceResponse") - proto.RegisterType((*DeleteNamespaceRequest)(nil), "containerd.services.namespaces.v1.DeleteNamespaceRequest") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto", fileDescriptor_8c41761eaeea4fd3) -} - -var fileDescriptor_8c41761eaeea4fd3 = []byte{ - // 551 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xcd, 0x24, 0xf9, 0x2c, 0xe5, 0x7a, 0xf3, 0x69, 0x08, 0x26, 0x32, 0x92, 0x09, 0x5e, 0x15, - 0xa9, 0x1a, 0xab, 0x41, 0x82, 0xfe, 0xec, 0x0a, 0x6d, 0x17, 0x14, 0x84, 0x2c, 0x21, 0x21, 0x58, - 0x80, 0x93, 0x4c, 0x5c, 0x13, 0xc7, 0x36, 0x9e, 0xb1, 0xa5, 0x88, 0x05, 0xbc, 0x0d, 0x1b, 0x1e, - 0x24, 0x4b, 0x96, 0xac, 0x50, 0x9b, 0x27, 0x41, 0x33, 0x76, 0xe2, 0xd0, 0x18, 0xe1, 0x06, 0xca, - 0xee, 0x5e, 0x7b, 0xce, 0x3d, 0x67, 0xae, 0xce, 0xb1, 0xe1, 0x89, 0xeb, 0xf1, 0xb3, 0xa4, 0x4f, - 0x06, 0xe1, 0xc4, 0x1a, 0x84, 0x01, 0x77, 0xbc, 0x80, 0xc6, 0xc3, 0xd5, 0xd2, 0x89, 0x3c, 0x8b, - 0xd1, 0x38, 0xf5, 0x06, 0x94, 0x59, 0x81, 0x33, 0xa1, 0x2c, 0x72, 0x44, 0x99, 0xee, 0x14, 0x1d, - 0x89, 0xe2, 0x90, 0x87, 0xf8, 0x6e, 0x01, 0x23, 0x0b, 0x08, 0x29, 0x20, 0x24, 0xdd, 0xd1, 0xdb, - 0x6e, 0xe8, 0x86, 0xf2, 0xb4, 0x25, 0xaa, 0x0c, 0xa8, 0xdf, 0x76, 0xc3, 0xd0, 0xf5, 0xa9, 0x25, - 0xbb, 0x7e, 0x32, 0xb2, 0xe8, 0x24, 0xe2, 0xd3, 0xfc, 0x65, 0xf7, 0xf2, 0xcb, 0x91, 0x47, 0xfd, - 0xe1, 0x9b, 0x89, 0xc3, 0xc6, 0xd9, 0x09, 0xf3, 0x0b, 0x82, 0xd6, 0xb3, 0x05, 0x0d, 0xc6, 0xd0, - 0x14, 0x9c, 0x1d, 0xd4, 0x45, 0x5b, 0x2d, 0x5b, 0xd6, 0xf8, 0x39, 0x28, 0xbe, 0xd3, 0xa7, 0x3e, - 0xeb, 0xd4, 0xbb, 0x8d, 0x2d, 0xb5, 0xb7, 0x4b, 0x7e, 0x2b, 0x95, 0x2c, 0x27, 0x92, 0x53, 0x09, - 0x3d, 0x0a, 0x78, 0x3c, 0xb5, 0xf3, 0x39, 0xfa, 0x1e, 0xa8, 0x2b, 0x8f, 0xf1, 0xff, 0xd0, 0x18, - 0xd3, 0x69, 0xce, 0x29, 0x4a, 0xdc, 0x86, 0xff, 0x52, 0xc7, 0x4f, 0x68, 0xa7, 0x2e, 0x9f, 0x65, - 0xcd, 0x7e, 0x7d, 0x17, 0x99, 0xf7, 0xe0, 0xc6, 0x09, 0xe5, 0xcb, 0xf1, 0x36, 0x7d, 0x9f, 0x50, - 0xc6, 0xcb, 0x74, 0x9b, 0x67, 0xd0, 0xfe, 0xf9, 0x28, 0x8b, 0xc2, 0x80, 0x89, 0xfb, 0xb4, 0x96, - 0x62, 0x25, 0x40, 0xed, 0x6d, 0x5f, 0xe5, 0x4a, 0x87, 0xcd, 0xd9, 0xf7, 0x3b, 0x35, 0xbb, 0x18, - 0x62, 0x5a, 0x70, 0xf3, 0xd4, 0x63, 0x05, 0x15, 0x5b, 0xc8, 0xd2, 0x40, 0x19, 0x79, 0x3e, 0xa7, - 0x71, 0x2e, 0x2c, 0xef, 0x4c, 0x1f, 0xb4, 0xcb, 0x80, 0x5c, 0x9c, 0x0d, 0x50, 0xd0, 0x76, 0x90, - 0x5c, 0xf8, 0x26, 0xea, 0x56, 0xa6, 0x98, 0xef, 0x40, 0x7b, 0x14, 0x53, 0x87, 0xd3, 0xb5, 0xb5, - 0xfd, 0xfd, 0x55, 0x8c, 0xe1, 0xd6, 0x1a, 0xd7, 0xb5, 0xed, 0xfd, 0x33, 0x02, 0xed, 0x45, 0x34, - 0xfc, 0x27, 0x37, 0xc3, 0x07, 0xa0, 0x26, 0x92, 0x4b, 0xa6, 0x47, 0x3a, 0x53, 0xed, 0xe9, 0x24, - 0x0b, 0x18, 0x59, 0x04, 0x8c, 0x1c, 0x8b, 0x80, 0x3d, 0x75, 0xd8, 0xd8, 0x86, 0xec, 0xb8, 0xa8, - 0xc5, 0x5a, 0xd6, 0x84, 0x5e, 0xdb, 0x5a, 0xb6, 0x41, 0x7b, 0x4c, 0x7d, 0x5a, 0xb2, 0x95, 0x92, - 0x98, 0xf4, 0xce, 0x9b, 0x00, 0x85, 0x11, 0x71, 0x0a, 0x8d, 0x13, 0xca, 0xf1, 0x83, 0x0a, 0x12, - 0x4a, 0x82, 0xa8, 0x3f, 0xbc, 0x32, 0x2e, 0x5f, 0xc3, 0x07, 0x68, 0x8a, 0x48, 0xe0, 0x2a, 0x5f, - 0x97, 0xd2, 0xb0, 0xe9, 0x7b, 0x1b, 0x20, 0x73, 0xf2, 0x8f, 0xa0, 0x64, 0xae, 0xc5, 0x55, 0x86, - 0x94, 0x87, 0x49, 0xdf, 0xdf, 0x04, 0x5a, 0x08, 0xc8, 0xfc, 0x51, 0x49, 0x40, 0xb9, 0xe7, 0x2b, - 0x09, 0xf8, 0x95, 0x0b, 0x5f, 0x83, 0x92, 0x79, 0xa6, 0x92, 0x80, 0x72, 0x7b, 0xe9, 0xda, 0x5a, - 0x1a, 0x8e, 0xc4, 0xbf, 0xe8, 0xf0, 0xed, 0xec, 0xc2, 0xa8, 0x7d, 0xbb, 0x30, 0x6a, 0x9f, 0xe6, - 0x06, 0x9a, 0xcd, 0x0d, 0xf4, 0x75, 0x6e, 0xa0, 0xf3, 0xb9, 0x81, 0x5e, 0x1d, 0xff, 0xc1, 0x2f, - 0xf4, 0xa0, 0xe8, 0x5e, 0xd6, 0xfa, 0x8a, 0xe4, 0xbc, 0xff, 0x23, 0x00, 0x00, 0xff, 0xff, 0x4f, - 0x4a, 0x87, 0xf3, 0x95, 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// NamespacesClient is the client API for Namespaces service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type NamespacesClient interface { - Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) - List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) - Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) - Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) - Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*types.Empty, error) -} - -type namespacesClient struct { - cc *grpc.ClientConn -} - -func NewNamespacesClient(cc *grpc.ClientConn) NamespacesClient { - return &namespacesClient{cc} -} - -func (c *namespacesClient) Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) { - out := new(GetNamespaceResponse) - err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *namespacesClient) List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) { - out := new(ListNamespacesResponse) - err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/List", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *namespacesClient) Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) { - out := new(CreateNamespaceResponse) - err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Create", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *namespacesClient) Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) { - out := new(UpdateNamespaceResponse) - err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *namespacesClient) Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// NamespacesServer is the server API for Namespaces service. -type NamespacesServer interface { - Get(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error) - List(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error) - Create(context.Context, *CreateNamespaceRequest) (*CreateNamespaceResponse, error) - Update(context.Context, *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error) - Delete(context.Context, *DeleteNamespaceRequest) (*types.Empty, error) -} - -// UnimplementedNamespacesServer can be embedded to have forward compatible implementations. -type UnimplementedNamespacesServer struct { -} - -func (*UnimplementedNamespacesServer) Get(ctx context.Context, req *GetNamespaceRequest) (*GetNamespaceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedNamespacesServer) List(ctx context.Context, req *ListNamespacesRequest) (*ListNamespacesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedNamespacesServer) Create(ctx context.Context, req *CreateNamespaceRequest) (*CreateNamespaceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") -} -func (*UnimplementedNamespacesServer) Update(ctx context.Context, req *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (*UnimplementedNamespacesServer) Delete(ctx context.Context, req *DeleteNamespaceRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} - -func RegisterNamespacesServer(s *grpc.Server, srv NamespacesServer) { - s.RegisterService(&_Namespaces_serviceDesc, srv) -} - -func _Namespaces_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNamespaceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NamespacesServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.namespaces.v1.Namespaces/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NamespacesServer).Get(ctx, req.(*GetNamespaceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Namespaces_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListNamespacesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NamespacesServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.namespaces.v1.Namespaces/List", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NamespacesServer).List(ctx, req.(*ListNamespacesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Namespaces_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateNamespaceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NamespacesServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.namespaces.v1.Namespaces/Create", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NamespacesServer).Create(ctx, req.(*CreateNamespaceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Namespaces_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateNamespaceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NamespacesServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.namespaces.v1.Namespaces/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NamespacesServer).Update(ctx, req.(*UpdateNamespaceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Namespaces_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNamespaceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NamespacesServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.namespaces.v1.Namespaces/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NamespacesServer).Delete(ctx, req.(*DeleteNamespaceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Namespaces_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.namespaces.v1.Namespaces", - HandlerType: (*NamespacesServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Get", - Handler: _Namespaces_Get_Handler, - }, - { - MethodName: "List", - Handler: _Namespaces_List_Handler, - }, - { - MethodName: "Create", - Handler: _Namespaces_Create_Handler, - }, - { - MethodName: "Update", - Handler: _Namespaces_Update_Handler, - }, - { - MethodName: "Delete", - Handler: _Namespaces_Delete_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto", -} - -func (m *Namespace) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Namespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintNamespace(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintNamespace(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintNamespace(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetNamespaceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetNamespaceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetNamespaceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetNamespaceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetNamespaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ListNamespacesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListNamespacesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListNamespacesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filter) > 0 { - i -= len(m.Filter) - copy(dAtA[i:], m.Filter) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Filter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListNamespacesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListNamespacesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListNamespacesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Namespaces) > 0 { - for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Namespaces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *CreateNamespaceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateNamespaceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CreateNamespaceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateNamespaceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateNamespaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateNamespaceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateNamespaceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.UpdateMask != nil { - { - size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateNamespaceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateNamespaceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateNamespaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *DeleteNamespaceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteNamespaceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int { - offset -= sovNamespace(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Namespace) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v))) - n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetNamespaceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetNamespaceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Namespace.Size() - n += 1 + l + sovNamespace(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListNamespacesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Filter) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListNamespacesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Namespaces) > 0 { - for _, e := range m.Namespaces { - l = e.Size() - n += 1 + l + sovNamespace(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateNamespaceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Namespace.Size() - n += 1 + l + sovNamespace(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateNamespaceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Namespace.Size() - n += 1 + l + sovNamespace(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateNamespaceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Namespace.Size() - n += 1 + l + sovNamespace(uint64(l)) - if m.UpdateMask != nil { - l = m.UpdateMask.Size() - n += 1 + l + sovNamespace(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateNamespaceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Namespace.Size() - n += 1 + l + sovNamespace(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteNamespaceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovNamespace(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozNamespace(x uint64) (n int) { - return sovNamespace(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Namespace) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&Namespace{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetNamespaceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetNamespaceRequest{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetNamespaceResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetNamespaceResponse{`, - `Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListNamespacesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListNamespacesRequest{`, - `Filter:` + fmt.Sprintf("%v", this.Filter) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListNamespacesResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForNamespaces := "[]Namespace{" - for _, f := range this.Namespaces { - repeatedStringForNamespaces += strings.Replace(strings.Replace(f.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + "," - } - repeatedStringForNamespaces += "}" - s := strings.Join([]string{`&ListNamespacesResponse{`, - `Namespaces:` + repeatedStringForNamespaces + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateNamespaceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateNamespaceRequest{`, - `Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateNamespaceResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateNamespaceResponse{`, - `Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateNamespaceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateNamespaceRequest{`, - `Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types.FieldMask", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateNamespaceResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateNamespaceResponse{`, - `Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteNamespaceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteNamespaceRequest{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringNamespace(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Namespace) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Namespace: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthNamespace - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthNamespace - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetNamespaceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetNamespaceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetNamespaceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetNamespaceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetNamespaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListNamespacesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListNamespacesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListNamespacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListNamespacesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListNamespacesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListNamespacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespaces = append(m.Namespaces, Namespace{}) - if err := m.Namespaces[len(m.Namespaces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateNamespaceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateNamespaceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateNamespaceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateNamespaceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateNamespaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateNamespaceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateNamespaceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdateMask == nil { - m.UpdateMask = &types.FieldMask{} - } - if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateNamespaceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateNamespaceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateNamespaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteNamespaceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteNamespaceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipNamespace(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNamespace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNamespace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNamespace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthNamespace - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupNamespace - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthNamespace - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{9} +} + +func (x *DeleteNamespaceRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDesc = []byte{ + 0x0a, 0x4b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x21, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, + 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xac, 0x01, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x50, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x29, + 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x62, 0x0a, 0x14, 0x47, 0x65, 0x74, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4a, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x2f, 0x0a, + 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x66, + 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4a, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x65, 0x0a, 0x17, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x22, 0xa1, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, + 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x65, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x2c, + 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xe0, 0x04, 0x0a, + 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x76, 0x0a, 0x03, 0x47, + 0x65, 0x74, 0x12, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x38, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x7f, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x39, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x7f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x39, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x39, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, + 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( - ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowNamespace = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupNamespace = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescData = file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_goTypes = []interface{}{ + (*Namespace)(nil), // 0: containerd.services.namespaces.v1.Namespace + (*GetNamespaceRequest)(nil), // 1: containerd.services.namespaces.v1.GetNamespaceRequest + (*GetNamespaceResponse)(nil), // 2: containerd.services.namespaces.v1.GetNamespaceResponse + (*ListNamespacesRequest)(nil), // 3: containerd.services.namespaces.v1.ListNamespacesRequest + (*ListNamespacesResponse)(nil), // 4: containerd.services.namespaces.v1.ListNamespacesResponse + (*CreateNamespaceRequest)(nil), // 5: containerd.services.namespaces.v1.CreateNamespaceRequest + (*CreateNamespaceResponse)(nil), // 6: containerd.services.namespaces.v1.CreateNamespaceResponse + (*UpdateNamespaceRequest)(nil), // 7: containerd.services.namespaces.v1.UpdateNamespaceRequest + (*UpdateNamespaceResponse)(nil), // 8: containerd.services.namespaces.v1.UpdateNamespaceResponse + (*DeleteNamespaceRequest)(nil), // 9: containerd.services.namespaces.v1.DeleteNamespaceRequest + nil, // 10: containerd.services.namespaces.v1.Namespace.LabelsEntry + (*fieldmaskpb.FieldMask)(nil), // 11: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 12: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_depIdxs = []int32{ + 10, // 0: containerd.services.namespaces.v1.Namespace.labels:type_name -> containerd.services.namespaces.v1.Namespace.LabelsEntry + 0, // 1: containerd.services.namespaces.v1.GetNamespaceResponse.namespace:type_name -> containerd.services.namespaces.v1.Namespace + 0, // 2: containerd.services.namespaces.v1.ListNamespacesResponse.namespaces:type_name -> containerd.services.namespaces.v1.Namespace + 0, // 3: containerd.services.namespaces.v1.CreateNamespaceRequest.namespace:type_name -> containerd.services.namespaces.v1.Namespace + 0, // 4: containerd.services.namespaces.v1.CreateNamespaceResponse.namespace:type_name -> containerd.services.namespaces.v1.Namespace + 0, // 5: containerd.services.namespaces.v1.UpdateNamespaceRequest.namespace:type_name -> containerd.services.namespaces.v1.Namespace + 11, // 6: containerd.services.namespaces.v1.UpdateNamespaceRequest.update_mask:type_name -> google.protobuf.FieldMask + 0, // 7: containerd.services.namespaces.v1.UpdateNamespaceResponse.namespace:type_name -> containerd.services.namespaces.v1.Namespace + 1, // 8: containerd.services.namespaces.v1.Namespaces.Get:input_type -> containerd.services.namespaces.v1.GetNamespaceRequest + 3, // 9: containerd.services.namespaces.v1.Namespaces.List:input_type -> containerd.services.namespaces.v1.ListNamespacesRequest + 5, // 10: containerd.services.namespaces.v1.Namespaces.Create:input_type -> containerd.services.namespaces.v1.CreateNamespaceRequest + 7, // 11: containerd.services.namespaces.v1.Namespaces.Update:input_type -> containerd.services.namespaces.v1.UpdateNamespaceRequest + 9, // 12: containerd.services.namespaces.v1.Namespaces.Delete:input_type -> containerd.services.namespaces.v1.DeleteNamespaceRequest + 2, // 13: containerd.services.namespaces.v1.Namespaces.Get:output_type -> containerd.services.namespaces.v1.GetNamespaceResponse + 4, // 14: containerd.services.namespaces.v1.Namespaces.List:output_type -> containerd.services.namespaces.v1.ListNamespacesResponse + 6, // 15: containerd.services.namespaces.v1.Namespaces.Create:output_type -> containerd.services.namespaces.v1.CreateNamespaceResponse + 8, // 16: containerd.services.namespaces.v1.Namespaces.Update:output_type -> containerd.services.namespaces.v1.UpdateNamespaceResponse + 12, // 17: containerd.services.namespaces.v1.Namespaces.Delete:output_type -> google.protobuf.Empty + 13, // [13:18] is the sub-list for method output_type + 8, // [8:13] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_init() } +func file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_init() { + if File_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Namespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetNamespaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetNamespaceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListNamespacesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListNamespacesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateNamespaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateNamespaceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateNamespaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateNamespaceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteNamespaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDesc, + NumEnums: 0, + NumMessages: 11, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto = out.File + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto index 90e3051238..910bcd6c72 100644 --- a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto +++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto @@ -18,7 +18,6 @@ syntax = "proto3"; package containerd.services.namespaces.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; @@ -60,7 +59,7 @@ message GetNamespaceRequest { } message GetNamespaceResponse { - Namespace namespace = 1 [(gogoproto.nullable) = false]; + Namespace namespace = 1; } message ListNamespacesRequest { @@ -68,15 +67,15 @@ message ListNamespacesRequest { } message ListNamespacesResponse { - repeated Namespace namespaces = 1 [(gogoproto.nullable) = false]; + repeated Namespace namespaces = 1; } message CreateNamespaceRequest { - Namespace namespace = 1 [(gogoproto.nullable) = false]; + Namespace namespace = 1; } message CreateNamespaceResponse { - Namespace namespace = 1 [(gogoproto.nullable) = false]; + Namespace namespace = 1; } // UpdateNamespaceRequest updates the metadata for a namespace. @@ -88,7 +87,7 @@ message UpdateNamespaceRequest { // Namespace provides the target value, as declared by the mask, for the update. // // The namespace field must be set. - Namespace namespace = 1 [(gogoproto.nullable) = false]; + Namespace namespace = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. @@ -100,7 +99,7 @@ message UpdateNamespaceRequest { } message UpdateNamespaceResponse { - Namespace namespace = 1 [(gogoproto.nullable) = false]; + Namespace namespace = 1; } message DeleteNamespaceRequest { diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace_grpc.pb.go new file mode 100644 index 0000000000..ed4e4c2ffe --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace_grpc.pb.go @@ -0,0 +1,250 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto + +package namespaces + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// NamespacesClient is the client API for Namespaces service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type NamespacesClient interface { + Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) + List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) + Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) + Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) + Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type namespacesClient struct { + cc grpc.ClientConnInterface +} + +func NewNamespacesClient(cc grpc.ClientConnInterface) NamespacesClient { + return &namespacesClient{cc} +} + +func (c *namespacesClient) Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) { + out := new(GetNamespaceResponse) + err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *namespacesClient) List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) { + out := new(ListNamespacesResponse) + err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *namespacesClient) Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) { + out := new(CreateNamespaceResponse) + err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *namespacesClient) Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) { + out := new(UpdateNamespaceResponse) + err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *namespacesClient) Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NamespacesServer is the server API for Namespaces service. +// All implementations must embed UnimplementedNamespacesServer +// for forward compatibility +type NamespacesServer interface { + Get(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error) + List(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error) + Create(context.Context, *CreateNamespaceRequest) (*CreateNamespaceResponse, error) + Update(context.Context, *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error) + Delete(context.Context, *DeleteNamespaceRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedNamespacesServer() +} + +// UnimplementedNamespacesServer must be embedded to have forward compatible implementations. +type UnimplementedNamespacesServer struct { +} + +func (UnimplementedNamespacesServer) Get(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedNamespacesServer) List(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedNamespacesServer) Create(context.Context, *CreateNamespaceRequest) (*CreateNamespaceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedNamespacesServer) Update(context.Context, *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedNamespacesServer) Delete(context.Context, *DeleteNamespaceRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedNamespacesServer) mustEmbedUnimplementedNamespacesServer() {} + +// UnsafeNamespacesServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to NamespacesServer will +// result in compilation errors. +type UnsafeNamespacesServer interface { + mustEmbedUnimplementedNamespacesServer() +} + +func RegisterNamespacesServer(s grpc.ServiceRegistrar, srv NamespacesServer) { + s.RegisterService(&Namespaces_ServiceDesc, srv) +} + +func _Namespaces_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNamespaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NamespacesServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.namespaces.v1.Namespaces/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NamespacesServer).Get(ctx, req.(*GetNamespaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Namespaces_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNamespacesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NamespacesServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.namespaces.v1.Namespaces/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NamespacesServer).List(ctx, req.(*ListNamespacesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Namespaces_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNamespaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NamespacesServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.namespaces.v1.Namespaces/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NamespacesServer).Create(ctx, req.(*CreateNamespaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Namespaces_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNamespaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NamespacesServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.namespaces.v1.Namespaces/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NamespacesServer).Update(ctx, req.(*UpdateNamespaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Namespaces_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNamespaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NamespacesServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.namespaces.v1.Namespaces/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NamespacesServer).Delete(ctx, req.(*DeleteNamespaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Namespaces_ServiceDesc is the grpc.ServiceDesc for Namespaces service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Namespaces_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.namespaces.v1.Namespaces", + HandlerType: (*NamespacesServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _Namespaces_Get_Handler, + }, + { + MethodName: "List", + Handler: _Namespaces_List_Handler, + }, + { + MethodName: "Create", + Handler: _Namespaces_Create_Handler, + }, + { + MethodName: "Update", + Handler: _Namespaces_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Namespaces_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/doc.go new file mode 100644 index 0000000000..f960350c16 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sandbox diff --git a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.pb.go b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.pb.go new file mode 100644 index 0000000000..49081aa8ac --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.pb.go @@ -0,0 +1,1948 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto + +// Sandbox is a v2 runtime extension that allows more complex execution environments for containers. +// This adds a notion of groups of containers that share same lifecycle and/or resources. +// A few good fits for sandbox can be: +// - A "pause" container in k8s, that acts as a parent process for child containers to hold network namespace. +// - (micro)VMs that launch a VM process and executes containers inside guest OS. +// containerd in this case remains implementation agnostic and delegates sandbox handling to runtimes. +// See proposal and discussion here: https://github.com/containerd/containerd/issues/4131 + +package sandbox + +import ( + types "github.com/containerd/containerd/api/types" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StoreCreateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sandbox *types.Sandbox `protobuf:"bytes,1,opt,name=sandbox,proto3" json:"sandbox,omitempty"` +} + +func (x *StoreCreateRequest) Reset() { + *x = StoreCreateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreCreateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreCreateRequest) ProtoMessage() {} + +func (x *StoreCreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreCreateRequest.ProtoReflect.Descriptor instead. +func (*StoreCreateRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{0} +} + +func (x *StoreCreateRequest) GetSandbox() *types.Sandbox { + if x != nil { + return x.Sandbox + } + return nil +} + +type StoreCreateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sandbox *types.Sandbox `protobuf:"bytes,1,opt,name=sandbox,proto3" json:"sandbox,omitempty"` +} + +func (x *StoreCreateResponse) Reset() { + *x = StoreCreateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreCreateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreCreateResponse) ProtoMessage() {} + +func (x *StoreCreateResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreCreateResponse.ProtoReflect.Descriptor instead. +func (*StoreCreateResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{1} +} + +func (x *StoreCreateResponse) GetSandbox() *types.Sandbox { + if x != nil { + return x.Sandbox + } + return nil +} + +type StoreUpdateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sandbox *types.Sandbox `protobuf:"bytes,1,opt,name=sandbox,proto3" json:"sandbox,omitempty"` + Fields []string `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` +} + +func (x *StoreUpdateRequest) Reset() { + *x = StoreUpdateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreUpdateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreUpdateRequest) ProtoMessage() {} + +func (x *StoreUpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreUpdateRequest.ProtoReflect.Descriptor instead. +func (*StoreUpdateRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{2} +} + +func (x *StoreUpdateRequest) GetSandbox() *types.Sandbox { + if x != nil { + return x.Sandbox + } + return nil +} + +func (x *StoreUpdateRequest) GetFields() []string { + if x != nil { + return x.Fields + } + return nil +} + +type StoreUpdateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sandbox *types.Sandbox `protobuf:"bytes,1,opt,name=sandbox,proto3" json:"sandbox,omitempty"` +} + +func (x *StoreUpdateResponse) Reset() { + *x = StoreUpdateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreUpdateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreUpdateResponse) ProtoMessage() {} + +func (x *StoreUpdateResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreUpdateResponse.ProtoReflect.Descriptor instead. +func (*StoreUpdateResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{3} +} + +func (x *StoreUpdateResponse) GetSandbox() *types.Sandbox { + if x != nil { + return x.Sandbox + } + return nil +} + +type StoreDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *StoreDeleteRequest) Reset() { + *x = StoreDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreDeleteRequest) ProtoMessage() {} + +func (x *StoreDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreDeleteRequest.ProtoReflect.Descriptor instead. +func (*StoreDeleteRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{4} +} + +func (x *StoreDeleteRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type StoreDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StoreDeleteResponse) Reset() { + *x = StoreDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreDeleteResponse) ProtoMessage() {} + +func (x *StoreDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreDeleteResponse.ProtoReflect.Descriptor instead. +func (*StoreDeleteResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{5} +} + +type StoreListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (x *StoreListRequest) Reset() { + *x = StoreListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreListRequest) ProtoMessage() {} + +func (x *StoreListRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreListRequest.ProtoReflect.Descriptor instead. +func (*StoreListRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{6} +} + +func (x *StoreListRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil +} + +type StoreListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + List []*types.Sandbox `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` +} + +func (x *StoreListResponse) Reset() { + *x = StoreListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreListResponse) ProtoMessage() {} + +func (x *StoreListResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreListResponse.ProtoReflect.Descriptor instead. +func (*StoreListResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{7} +} + +func (x *StoreListResponse) GetList() []*types.Sandbox { + if x != nil { + return x.List + } + return nil +} + +type StoreGetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *StoreGetRequest) Reset() { + *x = StoreGetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreGetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreGetRequest) ProtoMessage() {} + +func (x *StoreGetRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreGetRequest.ProtoReflect.Descriptor instead. +func (*StoreGetRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{8} +} + +func (x *StoreGetRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type StoreGetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sandbox *types.Sandbox `protobuf:"bytes,1,opt,name=sandbox,proto3" json:"sandbox,omitempty"` +} + +func (x *StoreGetResponse) Reset() { + *x = StoreGetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreGetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreGetResponse) ProtoMessage() {} + +func (x *StoreGetResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreGetResponse.ProtoReflect.Descriptor instead. +func (*StoreGetResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{9} +} + +func (x *StoreGetResponse) GetSandbox() *types.Sandbox { + if x != nil { + return x.Sandbox + } + return nil +} + +type ControllerCreateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Rootfs []*types.Mount `protobuf:"bytes,2,rep,name=rootfs,proto3" json:"rootfs,omitempty"` + Options *anypb.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` + NetnsPath string `protobuf:"bytes,4,opt,name=netns_path,json=netnsPath,proto3" json:"netns_path,omitempty"` +} + +func (x *ControllerCreateRequest) Reset() { + *x = ControllerCreateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerCreateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerCreateRequest) ProtoMessage() {} + +func (x *ControllerCreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerCreateRequest.ProtoReflect.Descriptor instead. +func (*ControllerCreateRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{10} +} + +func (x *ControllerCreateRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *ControllerCreateRequest) GetRootfs() []*types.Mount { + if x != nil { + return x.Rootfs + } + return nil +} + +func (x *ControllerCreateRequest) GetOptions() *anypb.Any { + if x != nil { + return x.Options + } + return nil +} + +func (x *ControllerCreateRequest) GetNetnsPath() string { + if x != nil { + return x.NetnsPath + } + return "" +} + +type ControllerCreateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *ControllerCreateResponse) Reset() { + *x = ControllerCreateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerCreateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerCreateResponse) ProtoMessage() {} + +func (x *ControllerCreateResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerCreateResponse.ProtoReflect.Descriptor instead. +func (*ControllerCreateResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{11} +} + +func (x *ControllerCreateResponse) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type ControllerStartRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *ControllerStartRequest) Reset() { + *x = ControllerStartRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerStartRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerStartRequest) ProtoMessage() {} + +func (x *ControllerStartRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerStartRequest.ProtoReflect.Descriptor instead. +func (*ControllerStartRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{12} +} + +func (x *ControllerStartRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type ControllerStartResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ControllerStartResponse) Reset() { + *x = ControllerStartResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerStartResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerStartResponse) ProtoMessage() {} + +func (x *ControllerStartResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerStartResponse.ProtoReflect.Descriptor instead. +func (*ControllerStartResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{13} +} + +func (x *ControllerStartResponse) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *ControllerStartResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *ControllerStartResponse) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *ControllerStartResponse) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +type ControllerPlatformRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *ControllerPlatformRequest) Reset() { + *x = ControllerPlatformRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerPlatformRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerPlatformRequest) ProtoMessage() {} + +func (x *ControllerPlatformRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerPlatformRequest.ProtoReflect.Descriptor instead. +func (*ControllerPlatformRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{14} +} + +func (x *ControllerPlatformRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type ControllerPlatformResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Platform *types.Platform `protobuf:"bytes,1,opt,name=platform,proto3" json:"platform,omitempty"` +} + +func (x *ControllerPlatformResponse) Reset() { + *x = ControllerPlatformResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerPlatformResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerPlatformResponse) ProtoMessage() {} + +func (x *ControllerPlatformResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerPlatformResponse.ProtoReflect.Descriptor instead. +func (*ControllerPlatformResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{15} +} + +func (x *ControllerPlatformResponse) GetPlatform() *types.Platform { + if x != nil { + return x.Platform + } + return nil +} + +type ControllerStopRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + TimeoutSecs uint32 `protobuf:"varint,2,opt,name=timeout_secs,json=timeoutSecs,proto3" json:"timeout_secs,omitempty"` +} + +func (x *ControllerStopRequest) Reset() { + *x = ControllerStopRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerStopRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerStopRequest) ProtoMessage() {} + +func (x *ControllerStopRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerStopRequest.ProtoReflect.Descriptor instead. +func (*ControllerStopRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{16} +} + +func (x *ControllerStopRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *ControllerStopRequest) GetTimeoutSecs() uint32 { + if x != nil { + return x.TimeoutSecs + } + return 0 +} + +type ControllerStopResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ControllerStopResponse) Reset() { + *x = ControllerStopResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerStopResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerStopResponse) ProtoMessage() {} + +func (x *ControllerStopResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerStopResponse.ProtoReflect.Descriptor instead. +func (*ControllerStopResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{17} +} + +type ControllerWaitRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *ControllerWaitRequest) Reset() { + *x = ControllerWaitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerWaitRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerWaitRequest) ProtoMessage() {} + +func (x *ControllerWaitRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerWaitRequest.ProtoReflect.Descriptor instead. +func (*ControllerWaitRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{18} +} + +func (x *ControllerWaitRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type ControllerWaitResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` +} + +func (x *ControllerWaitResponse) Reset() { + *x = ControllerWaitResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerWaitResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerWaitResponse) ProtoMessage() {} + +func (x *ControllerWaitResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerWaitResponse.ProtoReflect.Descriptor instead. +func (*ControllerWaitResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{19} +} + +func (x *ControllerWaitResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *ControllerWaitResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +type ControllerStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Verbose bool `protobuf:"varint,2,opt,name=verbose,proto3" json:"verbose,omitempty"` +} + +func (x *ControllerStatusRequest) Reset() { + *x = ControllerStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerStatusRequest) ProtoMessage() {} + +func (x *ControllerStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerStatusRequest.ProtoReflect.Descriptor instead. +func (*ControllerStatusRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{20} +} + +func (x *ControllerStatusRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *ControllerStatusRequest) GetVerbose() bool { + if x != nil { + return x.Verbose + } + return false +} + +type ControllerStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` + Info map[string]string `protobuf:"bytes,4,rep,name=info,proto3" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` + Extra *anypb.Any `protobuf:"bytes,7,opt,name=extra,proto3" json:"extra,omitempty"` +} + +func (x *ControllerStatusResponse) Reset() { + *x = ControllerStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerStatusResponse) ProtoMessage() {} + +func (x *ControllerStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerStatusResponse.ProtoReflect.Descriptor instead. +func (*ControllerStatusResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{21} +} + +func (x *ControllerStatusResponse) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *ControllerStatusResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *ControllerStatusResponse) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *ControllerStatusResponse) GetInfo() map[string]string { + if x != nil { + return x.Info + } + return nil +} + +func (x *ControllerStatusResponse) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *ControllerStatusResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +func (x *ControllerStatusResponse) GetExtra() *anypb.Any { + if x != nil { + return x.Extra + } + return nil +} + +type ControllerShutdownRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *ControllerShutdownRequest) Reset() { + *x = ControllerShutdownRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerShutdownRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerShutdownRequest) ProtoMessage() {} + +func (x *ControllerShutdownRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerShutdownRequest.ProtoReflect.Descriptor instead. +func (*ControllerShutdownRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{22} +} + +func (x *ControllerShutdownRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type ControllerShutdownResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ControllerShutdownResponse) Reset() { + *x = ControllerShutdownResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerShutdownResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerShutdownResponse) ProtoMessage() {} + +func (x *ControllerShutdownResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerShutdownResponse.ProtoReflect.Descriptor instead. +func (*ControllerShutdownResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{23} +} + +var File_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDesc = []byte{ + 0x0a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x49, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x22, 0x4a, 0x0a, 0x13, + 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, + 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x22, 0x61, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, + 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, + 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x4a, 0x0a, 0x13, 0x53, + 0x74, 0x6f, 0x72, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x22, 0x33, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, 0x65, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, + 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x22, 0x42, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, + 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x47, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x47, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x72, 0x65, + 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x22, 0xb8, 0x01, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x06, 0x72, + 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, + 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x12, 0x2e, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, + 0x6e, 0x65, 0x74, 0x6e, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6e, 0x65, 0x74, 0x6e, 0x73, 0x50, 0x61, 0x74, 0x68, 0x22, 0x39, 0x0a, 0x18, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x37, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, + 0x9d, 0x02, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x0a, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x5b, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x3a, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x54, 0x0a, 0x1a, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x22, 0x59, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, + 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x73, 0x22, 0x18, 0x0a, 0x16, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x72, + 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, + 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, + 0x41, 0x74, 0x22, 0x52, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x22, 0x92, 0x03, 0x0a, 0x18, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x03, 0x70, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x56, 0x0a, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x37, 0x0a, + 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, + 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, 0x78, 0x74, + 0x72, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, 0x0a, 0x19, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xb7, 0x04, 0x0a, 0x05, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, + 0x71, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x71, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x32, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, + 0x6f, 0x72, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, + 0x12, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x68, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x2f, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, + 0x6f, 0x72, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, + 0xf6, 0x06, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x7b, + 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x05, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x12, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, + 0x72, 0x6d, 0x12, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x04, 0x53, 0x74, 0x6f, + 0x70, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x6f, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x75, 0x0a, 0x04, 0x57, 0x61, 0x69, 0x74, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, + 0x6e, 0x12, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, 0x75, + 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescData = file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes = make([]protoimpl.MessageInfo, 26) +var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_goTypes = []interface{}{ + (*StoreCreateRequest)(nil), // 0: containerd.services.sandbox.v1.StoreCreateRequest + (*StoreCreateResponse)(nil), // 1: containerd.services.sandbox.v1.StoreCreateResponse + (*StoreUpdateRequest)(nil), // 2: containerd.services.sandbox.v1.StoreUpdateRequest + (*StoreUpdateResponse)(nil), // 3: containerd.services.sandbox.v1.StoreUpdateResponse + (*StoreDeleteRequest)(nil), // 4: containerd.services.sandbox.v1.StoreDeleteRequest + (*StoreDeleteResponse)(nil), // 5: containerd.services.sandbox.v1.StoreDeleteResponse + (*StoreListRequest)(nil), // 6: containerd.services.sandbox.v1.StoreListRequest + (*StoreListResponse)(nil), // 7: containerd.services.sandbox.v1.StoreListResponse + (*StoreGetRequest)(nil), // 8: containerd.services.sandbox.v1.StoreGetRequest + (*StoreGetResponse)(nil), // 9: containerd.services.sandbox.v1.StoreGetResponse + (*ControllerCreateRequest)(nil), // 10: containerd.services.sandbox.v1.ControllerCreateRequest + (*ControllerCreateResponse)(nil), // 11: containerd.services.sandbox.v1.ControllerCreateResponse + (*ControllerStartRequest)(nil), // 12: containerd.services.sandbox.v1.ControllerStartRequest + (*ControllerStartResponse)(nil), // 13: containerd.services.sandbox.v1.ControllerStartResponse + (*ControllerPlatformRequest)(nil), // 14: containerd.services.sandbox.v1.ControllerPlatformRequest + (*ControllerPlatformResponse)(nil), // 15: containerd.services.sandbox.v1.ControllerPlatformResponse + (*ControllerStopRequest)(nil), // 16: containerd.services.sandbox.v1.ControllerStopRequest + (*ControllerStopResponse)(nil), // 17: containerd.services.sandbox.v1.ControllerStopResponse + (*ControllerWaitRequest)(nil), // 18: containerd.services.sandbox.v1.ControllerWaitRequest + (*ControllerWaitResponse)(nil), // 19: containerd.services.sandbox.v1.ControllerWaitResponse + (*ControllerStatusRequest)(nil), // 20: containerd.services.sandbox.v1.ControllerStatusRequest + (*ControllerStatusResponse)(nil), // 21: containerd.services.sandbox.v1.ControllerStatusResponse + (*ControllerShutdownRequest)(nil), // 22: containerd.services.sandbox.v1.ControllerShutdownRequest + (*ControllerShutdownResponse)(nil), // 23: containerd.services.sandbox.v1.ControllerShutdownResponse + nil, // 24: containerd.services.sandbox.v1.ControllerStartResponse.LabelsEntry + nil, // 25: containerd.services.sandbox.v1.ControllerStatusResponse.InfoEntry + (*types.Sandbox)(nil), // 26: containerd.types.Sandbox + (*types.Mount)(nil), // 27: containerd.types.Mount + (*anypb.Any)(nil), // 28: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 29: google.protobuf.Timestamp + (*types.Platform)(nil), // 30: containerd.types.Platform +} +var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_depIdxs = []int32{ + 26, // 0: containerd.services.sandbox.v1.StoreCreateRequest.sandbox:type_name -> containerd.types.Sandbox + 26, // 1: containerd.services.sandbox.v1.StoreCreateResponse.sandbox:type_name -> containerd.types.Sandbox + 26, // 2: containerd.services.sandbox.v1.StoreUpdateRequest.sandbox:type_name -> containerd.types.Sandbox + 26, // 3: containerd.services.sandbox.v1.StoreUpdateResponse.sandbox:type_name -> containerd.types.Sandbox + 26, // 4: containerd.services.sandbox.v1.StoreListResponse.list:type_name -> containerd.types.Sandbox + 26, // 5: containerd.services.sandbox.v1.StoreGetResponse.sandbox:type_name -> containerd.types.Sandbox + 27, // 6: containerd.services.sandbox.v1.ControllerCreateRequest.rootfs:type_name -> containerd.types.Mount + 28, // 7: containerd.services.sandbox.v1.ControllerCreateRequest.options:type_name -> google.protobuf.Any + 29, // 8: containerd.services.sandbox.v1.ControllerStartResponse.created_at:type_name -> google.protobuf.Timestamp + 24, // 9: containerd.services.sandbox.v1.ControllerStartResponse.labels:type_name -> containerd.services.sandbox.v1.ControllerStartResponse.LabelsEntry + 30, // 10: containerd.services.sandbox.v1.ControllerPlatformResponse.platform:type_name -> containerd.types.Platform + 29, // 11: containerd.services.sandbox.v1.ControllerWaitResponse.exited_at:type_name -> google.protobuf.Timestamp + 25, // 12: containerd.services.sandbox.v1.ControllerStatusResponse.info:type_name -> containerd.services.sandbox.v1.ControllerStatusResponse.InfoEntry + 29, // 13: containerd.services.sandbox.v1.ControllerStatusResponse.created_at:type_name -> google.protobuf.Timestamp + 29, // 14: containerd.services.sandbox.v1.ControllerStatusResponse.exited_at:type_name -> google.protobuf.Timestamp + 28, // 15: containerd.services.sandbox.v1.ControllerStatusResponse.extra:type_name -> google.protobuf.Any + 0, // 16: containerd.services.sandbox.v1.Store.Create:input_type -> containerd.services.sandbox.v1.StoreCreateRequest + 2, // 17: containerd.services.sandbox.v1.Store.Update:input_type -> containerd.services.sandbox.v1.StoreUpdateRequest + 4, // 18: containerd.services.sandbox.v1.Store.Delete:input_type -> containerd.services.sandbox.v1.StoreDeleteRequest + 6, // 19: containerd.services.sandbox.v1.Store.List:input_type -> containerd.services.sandbox.v1.StoreListRequest + 8, // 20: containerd.services.sandbox.v1.Store.Get:input_type -> containerd.services.sandbox.v1.StoreGetRequest + 10, // 21: containerd.services.sandbox.v1.Controller.Create:input_type -> containerd.services.sandbox.v1.ControllerCreateRequest + 12, // 22: containerd.services.sandbox.v1.Controller.Start:input_type -> containerd.services.sandbox.v1.ControllerStartRequest + 14, // 23: containerd.services.sandbox.v1.Controller.Platform:input_type -> containerd.services.sandbox.v1.ControllerPlatformRequest + 16, // 24: containerd.services.sandbox.v1.Controller.Stop:input_type -> containerd.services.sandbox.v1.ControllerStopRequest + 18, // 25: containerd.services.sandbox.v1.Controller.Wait:input_type -> containerd.services.sandbox.v1.ControllerWaitRequest + 20, // 26: containerd.services.sandbox.v1.Controller.Status:input_type -> containerd.services.sandbox.v1.ControllerStatusRequest + 22, // 27: containerd.services.sandbox.v1.Controller.Shutdown:input_type -> containerd.services.sandbox.v1.ControllerShutdownRequest + 1, // 28: containerd.services.sandbox.v1.Store.Create:output_type -> containerd.services.sandbox.v1.StoreCreateResponse + 3, // 29: containerd.services.sandbox.v1.Store.Update:output_type -> containerd.services.sandbox.v1.StoreUpdateResponse + 5, // 30: containerd.services.sandbox.v1.Store.Delete:output_type -> containerd.services.sandbox.v1.StoreDeleteResponse + 7, // 31: containerd.services.sandbox.v1.Store.List:output_type -> containerd.services.sandbox.v1.StoreListResponse + 9, // 32: containerd.services.sandbox.v1.Store.Get:output_type -> containerd.services.sandbox.v1.StoreGetResponse + 11, // 33: containerd.services.sandbox.v1.Controller.Create:output_type -> containerd.services.sandbox.v1.ControllerCreateResponse + 13, // 34: containerd.services.sandbox.v1.Controller.Start:output_type -> containerd.services.sandbox.v1.ControllerStartResponse + 15, // 35: containerd.services.sandbox.v1.Controller.Platform:output_type -> containerd.services.sandbox.v1.ControllerPlatformResponse + 17, // 36: containerd.services.sandbox.v1.Controller.Stop:output_type -> containerd.services.sandbox.v1.ControllerStopResponse + 19, // 37: containerd.services.sandbox.v1.Controller.Wait:output_type -> containerd.services.sandbox.v1.ControllerWaitResponse + 21, // 38: containerd.services.sandbox.v1.Controller.Status:output_type -> containerd.services.sandbox.v1.ControllerStatusResponse + 23, // 39: containerd.services.sandbox.v1.Controller.Shutdown:output_type -> containerd.services.sandbox.v1.ControllerShutdownResponse + 28, // [28:40] is the sub-list for method output_type + 16, // [16:28] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_init() } +func file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_init() { + if File_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreCreateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreCreateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreUpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreUpdateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreGetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreGetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerCreateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerCreateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerStartRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerStartResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerPlatformRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerPlatformResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerStopRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerStopResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerWaitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerWaitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerShutdownRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerShutdownResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDesc, + NumEnums: 0, + NumMessages: 26, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto = out.File + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto new file mode 100644 index 0000000000..40a2db24f6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto @@ -0,0 +1,163 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +// Sandbox is a v2 runtime extension that allows more complex execution environments for containers. +// This adds a notion of groups of containers that share same lifecycle and/or resources. +// A few good fits for sandbox can be: +// - A "pause" container in k8s, that acts as a parent process for child containers to hold network namespace. +// - (micro)VMs that launch a VM process and executes containers inside guest OS. +// containerd in this case remains implementation agnostic and delegates sandbox handling to runtimes. +// See proposal and discussion here: https://github.com/containerd/containerd/issues/4131 +package containerd.services.sandbox.v1; + +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +import "github.com/containerd/containerd/api/types/sandbox.proto"; +import "github.com/containerd/containerd/api/types/mount.proto"; +import "github.com/containerd/containerd/api/types/platform.proto"; + +option go_package = "github.com/containerd/containerd/api/services/sandbox/v1;sandbox"; + +// Store provides a metadata storage interface for sandboxes. Similarly to `Containers`, +// sandbox object includes info required to start a new instance, but no runtime state. +// When running a new sandbox instance, store objects are used as base type to create from. +service Store { + rpc Create(StoreCreateRequest) returns (StoreCreateResponse); + rpc Update(StoreUpdateRequest) returns (StoreUpdateResponse); + rpc Delete(StoreDeleteRequest) returns (StoreDeleteResponse); + rpc List(StoreListRequest) returns (StoreListResponse); + rpc Get(StoreGetRequest) returns (StoreGetResponse); +} + +message StoreCreateRequest { + containerd.types.Sandbox sandbox = 1; +} + +message StoreCreateResponse { + containerd.types.Sandbox sandbox = 1; +} + +message StoreUpdateRequest { + containerd.types.Sandbox sandbox = 1; + repeated string fields = 2; +} + +message StoreUpdateResponse { + containerd.types.Sandbox sandbox = 1; +} + +message StoreDeleteRequest { + string sandbox_id = 1; +} + +message StoreDeleteResponse {} + +message StoreListRequest { + repeated string filters = 1; +} + +message StoreListResponse { + repeated containerd.types.Sandbox list = 1; +} + +message StoreGetRequest { + string sandbox_id = 1; +} + +message StoreGetResponse { + containerd.types.Sandbox sandbox = 1; +} + +// Controller is an interface to manage runtime sandbox instances. +service Controller { + rpc Create(ControllerCreateRequest) returns (ControllerCreateResponse); + rpc Start(ControllerStartRequest) returns (ControllerStartResponse); + rpc Platform(ControllerPlatformRequest) returns (ControllerPlatformResponse); + rpc Stop(ControllerStopRequest) returns (ControllerStopResponse); + rpc Wait(ControllerWaitRequest) returns (ControllerWaitResponse); + rpc Status(ControllerStatusRequest) returns (ControllerStatusResponse); + rpc Shutdown(ControllerShutdownRequest) returns (ControllerShutdownResponse); +} + +message ControllerCreateRequest { + string sandbox_id = 1; + repeated containerd.types.Mount rootfs = 2; + google.protobuf.Any options = 3; + string netns_path = 4; +} + +message ControllerCreateResponse { + string sandbox_id = 1; +} + +message ControllerStartRequest { + string sandbox_id = 1; +} + +message ControllerStartResponse { + string sandbox_id = 1; + uint32 pid = 2; + google.protobuf.Timestamp created_at = 3; + map labels = 4; +} + +message ControllerPlatformRequest { + string sandbox_id = 1; +} + +message ControllerPlatformResponse { + containerd.types.Platform platform = 1; +} + +message ControllerStopRequest { + string sandbox_id = 1; + uint32 timeout_secs = 2; +} + +message ControllerStopResponse {} + +message ControllerWaitRequest { + string sandbox_id = 1; +} + +message ControllerWaitResponse { + uint32 exit_status = 1; + google.protobuf.Timestamp exited_at = 2; +} + +message ControllerStatusRequest { + string sandbox_id = 1; + bool verbose = 2; +} + +message ControllerStatusResponse { + string sandbox_id = 1; + uint32 pid = 2; + string state = 3; + map info = 4; + google.protobuf.Timestamp created_at = 5; + google.protobuf.Timestamp exited_at = 6; + google.protobuf.Any extra = 7; +} + +message ControllerShutdownRequest { + string sandbox_id = 1; +} + +message ControllerShutdownResponse {} diff --git a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_grpc.pb.go new file mode 100644 index 0000000000..16d746d75c --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_grpc.pb.go @@ -0,0 +1,551 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto + +package sandbox + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// StoreClient is the client API for Store service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type StoreClient interface { + Create(ctx context.Context, in *StoreCreateRequest, opts ...grpc.CallOption) (*StoreCreateResponse, error) + Update(ctx context.Context, in *StoreUpdateRequest, opts ...grpc.CallOption) (*StoreUpdateResponse, error) + Delete(ctx context.Context, in *StoreDeleteRequest, opts ...grpc.CallOption) (*StoreDeleteResponse, error) + List(ctx context.Context, in *StoreListRequest, opts ...grpc.CallOption) (*StoreListResponse, error) + Get(ctx context.Context, in *StoreGetRequest, opts ...grpc.CallOption) (*StoreGetResponse, error) +} + +type storeClient struct { + cc grpc.ClientConnInterface +} + +func NewStoreClient(cc grpc.ClientConnInterface) StoreClient { + return &storeClient{cc} +} + +func (c *storeClient) Create(ctx context.Context, in *StoreCreateRequest, opts ...grpc.CallOption) (*StoreCreateResponse, error) { + out := new(StoreCreateResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Store/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storeClient) Update(ctx context.Context, in *StoreUpdateRequest, opts ...grpc.CallOption) (*StoreUpdateResponse, error) { + out := new(StoreUpdateResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Store/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storeClient) Delete(ctx context.Context, in *StoreDeleteRequest, opts ...grpc.CallOption) (*StoreDeleteResponse, error) { + out := new(StoreDeleteResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Store/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storeClient) List(ctx context.Context, in *StoreListRequest, opts ...grpc.CallOption) (*StoreListResponse, error) { + out := new(StoreListResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Store/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storeClient) Get(ctx context.Context, in *StoreGetRequest, opts ...grpc.CallOption) (*StoreGetResponse, error) { + out := new(StoreGetResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Store/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// StoreServer is the server API for Store service. +// All implementations must embed UnimplementedStoreServer +// for forward compatibility +type StoreServer interface { + Create(context.Context, *StoreCreateRequest) (*StoreCreateResponse, error) + Update(context.Context, *StoreUpdateRequest) (*StoreUpdateResponse, error) + Delete(context.Context, *StoreDeleteRequest) (*StoreDeleteResponse, error) + List(context.Context, *StoreListRequest) (*StoreListResponse, error) + Get(context.Context, *StoreGetRequest) (*StoreGetResponse, error) + mustEmbedUnimplementedStoreServer() +} + +// UnimplementedStoreServer must be embedded to have forward compatible implementations. +type UnimplementedStoreServer struct { +} + +func (UnimplementedStoreServer) Create(context.Context, *StoreCreateRequest) (*StoreCreateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedStoreServer) Update(context.Context, *StoreUpdateRequest) (*StoreUpdateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedStoreServer) Delete(context.Context, *StoreDeleteRequest) (*StoreDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedStoreServer) List(context.Context, *StoreListRequest) (*StoreListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedStoreServer) Get(context.Context, *StoreGetRequest) (*StoreGetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedStoreServer) mustEmbedUnimplementedStoreServer() {} + +// UnsafeStoreServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to StoreServer will +// result in compilation errors. +type UnsafeStoreServer interface { + mustEmbedUnimplementedStoreServer() +} + +func RegisterStoreServer(s grpc.ServiceRegistrar, srv StoreServer) { + s.RegisterService(&Store_ServiceDesc, srv) +} + +func _Store_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StoreCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Store/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).Create(ctx, req.(*StoreCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Store_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StoreUpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Store/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).Update(ctx, req.(*StoreUpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Store_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StoreDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Store/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).Delete(ctx, req.(*StoreDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Store_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StoreListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Store/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).List(ctx, req.(*StoreListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Store_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StoreGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Store/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).Get(ctx, req.(*StoreGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Store_ServiceDesc is the grpc.ServiceDesc for Store service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Store_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.sandbox.v1.Store", + HandlerType: (*StoreServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Create", + Handler: _Store_Create_Handler, + }, + { + MethodName: "Update", + Handler: _Store_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Store_Delete_Handler, + }, + { + MethodName: "List", + Handler: _Store_List_Handler, + }, + { + MethodName: "Get", + Handler: _Store_Get_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto", +} + +// ControllerClient is the client API for Controller service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ControllerClient interface { + Create(ctx context.Context, in *ControllerCreateRequest, opts ...grpc.CallOption) (*ControllerCreateResponse, error) + Start(ctx context.Context, in *ControllerStartRequest, opts ...grpc.CallOption) (*ControllerStartResponse, error) + Platform(ctx context.Context, in *ControllerPlatformRequest, opts ...grpc.CallOption) (*ControllerPlatformResponse, error) + Stop(ctx context.Context, in *ControllerStopRequest, opts ...grpc.CallOption) (*ControllerStopResponse, error) + Wait(ctx context.Context, in *ControllerWaitRequest, opts ...grpc.CallOption) (*ControllerWaitResponse, error) + Status(ctx context.Context, in *ControllerStatusRequest, opts ...grpc.CallOption) (*ControllerStatusResponse, error) + Shutdown(ctx context.Context, in *ControllerShutdownRequest, opts ...grpc.CallOption) (*ControllerShutdownResponse, error) +} + +type controllerClient struct { + cc grpc.ClientConnInterface +} + +func NewControllerClient(cc grpc.ClientConnInterface) ControllerClient { + return &controllerClient{cc} +} + +func (c *controllerClient) Create(ctx context.Context, in *ControllerCreateRequest, opts ...grpc.CallOption) (*ControllerCreateResponse, error) { + out := new(ControllerCreateResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) Start(ctx context.Context, in *ControllerStartRequest, opts ...grpc.CallOption) (*ControllerStartResponse, error) { + out := new(ControllerStartResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Start", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) Platform(ctx context.Context, in *ControllerPlatformRequest, opts ...grpc.CallOption) (*ControllerPlatformResponse, error) { + out := new(ControllerPlatformResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Platform", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) Stop(ctx context.Context, in *ControllerStopRequest, opts ...grpc.CallOption) (*ControllerStopResponse, error) { + out := new(ControllerStopResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) Wait(ctx context.Context, in *ControllerWaitRequest, opts ...grpc.CallOption) (*ControllerWaitResponse, error) { + out := new(ControllerWaitResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Wait", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) Status(ctx context.Context, in *ControllerStatusRequest, opts ...grpc.CallOption) (*ControllerStatusResponse, error) { + out := new(ControllerStatusResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Status", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) Shutdown(ctx context.Context, in *ControllerShutdownRequest, opts ...grpc.CallOption) (*ControllerShutdownResponse, error) { + out := new(ControllerShutdownResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Shutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ControllerServer is the server API for Controller service. +// All implementations must embed UnimplementedControllerServer +// for forward compatibility +type ControllerServer interface { + Create(context.Context, *ControllerCreateRequest) (*ControllerCreateResponse, error) + Start(context.Context, *ControllerStartRequest) (*ControllerStartResponse, error) + Platform(context.Context, *ControllerPlatformRequest) (*ControllerPlatformResponse, error) + Stop(context.Context, *ControllerStopRequest) (*ControllerStopResponse, error) + Wait(context.Context, *ControllerWaitRequest) (*ControllerWaitResponse, error) + Status(context.Context, *ControllerStatusRequest) (*ControllerStatusResponse, error) + Shutdown(context.Context, *ControllerShutdownRequest) (*ControllerShutdownResponse, error) + mustEmbedUnimplementedControllerServer() +} + +// UnimplementedControllerServer must be embedded to have forward compatible implementations. +type UnimplementedControllerServer struct { +} + +func (UnimplementedControllerServer) Create(context.Context, *ControllerCreateRequest) (*ControllerCreateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedControllerServer) Start(context.Context, *ControllerStartRequest) (*ControllerStartResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Start not implemented") +} +func (UnimplementedControllerServer) Platform(context.Context, *ControllerPlatformRequest) (*ControllerPlatformResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Platform not implemented") +} +func (UnimplementedControllerServer) Stop(context.Context, *ControllerStopRequest) (*ControllerStopResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} +func (UnimplementedControllerServer) Wait(context.Context, *ControllerWaitRequest) (*ControllerWaitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Wait not implemented") +} +func (UnimplementedControllerServer) Status(context.Context, *ControllerStatusRequest) (*ControllerStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +} +func (UnimplementedControllerServer) Shutdown(context.Context, *ControllerShutdownRequest) (*ControllerShutdownResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") +} +func (UnimplementedControllerServer) mustEmbedUnimplementedControllerServer() {} + +// UnsafeControllerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ControllerServer will +// result in compilation errors. +type UnsafeControllerServer interface { + mustEmbedUnimplementedControllerServer() +} + +func RegisterControllerServer(s grpc.ServiceRegistrar, srv ControllerServer) { + s.RegisterService(&Controller_ServiceDesc, srv) +} + +func _Controller_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Create(ctx, req.(*ControllerCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerStartRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Start(ctx, req.(*ControllerStartRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_Platform_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerPlatformRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Platform(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Platform", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Platform(ctx, req.(*ControllerPlatformRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerStopRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Stop(ctx, req.(*ControllerStopRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_Wait_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerWaitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Wait(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Wait", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Wait(ctx, req.(*ControllerWaitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Status(ctx, req.(*ControllerStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerShutdownRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Shutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Shutdown(ctx, req.(*ControllerShutdownRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Controller_ServiceDesc is the grpc.ServiceDesc for Controller service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Controller_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.sandbox.v1.Controller", + HandlerType: (*ControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Create", + Handler: _Controller_Create_Handler, + }, + { + MethodName: "Start", + Handler: _Controller_Start_Handler, + }, + { + MethodName: "Platform", + Handler: _Controller_Platform_Handler, + }, + { + MethodName: "Stop", + Handler: _Controller_Stop_Handler, + }, + { + MethodName: "Wait", + Handler: _Controller_Wait_Handler, + }, + { + MethodName: "Status", + Handler: _Controller_Status_Handler, + }, + { + MethodName: "Shutdown", + Handler: _Controller_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go index e8c66644f8..b7cec8048b 100644 --- a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go @@ -1,617 +1,884 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto package snapshots import ( - context "context" - fmt "fmt" types "github.com/containerd/containerd/api/types" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types1 "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Kind int32 const ( - KindUnknown Kind = 0 - KindView Kind = 1 - KindActive Kind = 2 - KindCommitted Kind = 3 + Kind_UNKNOWN Kind = 0 + Kind_VIEW Kind = 1 + Kind_ACTIVE Kind = 2 + Kind_COMMITTED Kind = 3 ) -var Kind_name = map[int32]string{ - 0: "UNKNOWN", - 1: "VIEW", - 2: "ACTIVE", - 3: "COMMITTED", -} +// Enum value maps for Kind. +var ( + Kind_name = map[int32]string{ + 0: "UNKNOWN", + 1: "VIEW", + 2: "ACTIVE", + 3: "COMMITTED", + } + Kind_value = map[string]int32{ + "UNKNOWN": 0, + "VIEW": 1, + "ACTIVE": 2, + "COMMITTED": 3, + } +) -var Kind_value = map[string]int32{ - "UNKNOWN": 0, - "VIEW": 1, - "ACTIVE": 2, - "COMMITTED": 3, +func (x Kind) Enum() *Kind { + p := new(Kind) + *p = x + return p } func (x Kind) String() string { - return proto.EnumName(Kind_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } +func (Kind) Descriptor() protoreflect.EnumDescriptor { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_enumTypes[0].Descriptor() +} + +func (Kind) Type() protoreflect.EnumType { + return &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_enumTypes[0] +} + +func (x Kind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Kind.Descriptor instead. func (Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{0} + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{0} } type PrepareSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Parent string `protobuf:"bytes,3,opt,name=parent,proto3" json:"parent,omitempty"` // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *PrepareSnapshotRequest) Reset() { *m = PrepareSnapshotRequest{} } -func (*PrepareSnapshotRequest) ProtoMessage() {} -func (*PrepareSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{0} -} -func (m *PrepareSnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrepareSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrepareSnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *PrepareSnapshotRequest) Reset() { + *x = PrepareSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *PrepareSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrepareSnapshotRequest.Merge(m, src) -} -func (m *PrepareSnapshotRequest) XXX_Size() int { - return m.Size() -} -func (m *PrepareSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PrepareSnapshotRequest.DiscardUnknown(m) + +func (x *PrepareSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_PrepareSnapshotRequest proto.InternalMessageInfo +func (*PrepareSnapshotRequest) ProtoMessage() {} + +func (x *PrepareSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareSnapshotRequest.ProtoReflect.Descriptor instead. +func (*PrepareSnapshotRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{0} +} + +func (x *PrepareSnapshotRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +func (x *PrepareSnapshotRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *PrepareSnapshotRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *PrepareSnapshotRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type PrepareSnapshotResponse struct { - Mounts []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mounts []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"` } -func (m *PrepareSnapshotResponse) Reset() { *m = PrepareSnapshotResponse{} } -func (*PrepareSnapshotResponse) ProtoMessage() {} -func (*PrepareSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{1} -} -func (m *PrepareSnapshotResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrepareSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrepareSnapshotResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *PrepareSnapshotResponse) Reset() { + *x = PrepareSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *PrepareSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrepareSnapshotResponse.Merge(m, src) -} -func (m *PrepareSnapshotResponse) XXX_Size() int { - return m.Size() -} -func (m *PrepareSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PrepareSnapshotResponse.DiscardUnknown(m) + +func (x *PrepareSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_PrepareSnapshotResponse proto.InternalMessageInfo +func (*PrepareSnapshotResponse) ProtoMessage() {} + +func (x *PrepareSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareSnapshotResponse.ProtoReflect.Descriptor instead. +func (*PrepareSnapshotResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{1} +} + +func (x *PrepareSnapshotResponse) GetMounts() []*types.Mount { + if x != nil { + return x.Mounts + } + return nil +} type ViewSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Parent string `protobuf:"bytes,3,opt,name=parent,proto3" json:"parent,omitempty"` // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *ViewSnapshotRequest) Reset() { *m = ViewSnapshotRequest{} } -func (*ViewSnapshotRequest) ProtoMessage() {} -func (*ViewSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{2} -} -func (m *ViewSnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ViewSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ViewSnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ViewSnapshotRequest) Reset() { + *x = ViewSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ViewSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ViewSnapshotRequest.Merge(m, src) -} -func (m *ViewSnapshotRequest) XXX_Size() int { - return m.Size() -} -func (m *ViewSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ViewSnapshotRequest.DiscardUnknown(m) + +func (x *ViewSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ViewSnapshotRequest proto.InternalMessageInfo +func (*ViewSnapshotRequest) ProtoMessage() {} + +func (x *ViewSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ViewSnapshotRequest.ProtoReflect.Descriptor instead. +func (*ViewSnapshotRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{2} +} + +func (x *ViewSnapshotRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +func (x *ViewSnapshotRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *ViewSnapshotRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ViewSnapshotRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type ViewSnapshotResponse struct { - Mounts []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mounts []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"` } -func (m *ViewSnapshotResponse) Reset() { *m = ViewSnapshotResponse{} } -func (*ViewSnapshotResponse) ProtoMessage() {} -func (*ViewSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{3} -} -func (m *ViewSnapshotResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ViewSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ViewSnapshotResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ViewSnapshotResponse) Reset() { + *x = ViewSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ViewSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ViewSnapshotResponse.Merge(m, src) -} -func (m *ViewSnapshotResponse) XXX_Size() int { - return m.Size() -} -func (m *ViewSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ViewSnapshotResponse.DiscardUnknown(m) + +func (x *ViewSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ViewSnapshotResponse proto.InternalMessageInfo +func (*ViewSnapshotResponse) ProtoMessage() {} + +func (x *ViewSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ViewSnapshotResponse.ProtoReflect.Descriptor instead. +func (*ViewSnapshotResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{3} +} + +func (x *ViewSnapshotResponse) GetMounts() []*types.Mount { + if x != nil { + return x.Mounts + } + return nil +} type MountsRequest struct { - Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` } -func (m *MountsRequest) Reset() { *m = MountsRequest{} } -func (*MountsRequest) ProtoMessage() {} -func (*MountsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{4} -} -func (m *MountsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MountsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MountsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *MountsRequest) Reset() { + *x = MountsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *MountsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MountsRequest.Merge(m, src) -} -func (m *MountsRequest) XXX_Size() int { - return m.Size() -} -func (m *MountsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MountsRequest.DiscardUnknown(m) + +func (x *MountsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_MountsRequest proto.InternalMessageInfo +func (*MountsRequest) ProtoMessage() {} + +func (x *MountsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MountsRequest.ProtoReflect.Descriptor instead. +func (*MountsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{4} +} + +func (x *MountsRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +func (x *MountsRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} type MountsResponse struct { - Mounts []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mounts []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"` } -func (m *MountsResponse) Reset() { *m = MountsResponse{} } -func (*MountsResponse) ProtoMessage() {} -func (*MountsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{5} -} -func (m *MountsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MountsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MountsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *MountsResponse) Reset() { + *x = MountsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *MountsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MountsResponse.Merge(m, src) -} -func (m *MountsResponse) XXX_Size() int { - return m.Size() -} -func (m *MountsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MountsResponse.DiscardUnknown(m) + +func (x *MountsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_MountsResponse proto.InternalMessageInfo +func (*MountsResponse) ProtoMessage() {} + +func (x *MountsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MountsResponse.ProtoReflect.Descriptor instead. +func (*MountsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{5} +} + +func (x *MountsResponse) GetMounts() []*types.Mount { + if x != nil { + return x.Mounts + } + return nil +} type RemoveSnapshotRequest struct { - Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` } -func (m *RemoveSnapshotRequest) Reset() { *m = RemoveSnapshotRequest{} } -func (*RemoveSnapshotRequest) ProtoMessage() {} -func (*RemoveSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{6} -} -func (m *RemoveSnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RemoveSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RemoveSnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *RemoveSnapshotRequest) Reset() { + *x = RemoveSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *RemoveSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveSnapshotRequest.Merge(m, src) -} -func (m *RemoveSnapshotRequest) XXX_Size() int { - return m.Size() -} -func (m *RemoveSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveSnapshotRequest.DiscardUnknown(m) + +func (x *RemoveSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_RemoveSnapshotRequest proto.InternalMessageInfo +func (*RemoveSnapshotRequest) ProtoMessage() {} + +func (x *RemoveSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoveSnapshotRequest.ProtoReflect.Descriptor instead. +func (*RemoveSnapshotRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{6} +} + +func (x *RemoveSnapshotRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +func (x *RemoveSnapshotRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} type CommitSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *CommitSnapshotRequest) Reset() { *m = CommitSnapshotRequest{} } -func (*CommitSnapshotRequest) ProtoMessage() {} -func (*CommitSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{7} -} -func (m *CommitSnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CommitSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CommitSnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CommitSnapshotRequest) Reset() { + *x = CommitSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CommitSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitSnapshotRequest.Merge(m, src) -} -func (m *CommitSnapshotRequest) XXX_Size() int { - return m.Size() -} -func (m *CommitSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CommitSnapshotRequest.DiscardUnknown(m) + +func (x *CommitSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CommitSnapshotRequest proto.InternalMessageInfo +func (*CommitSnapshotRequest) ProtoMessage() {} + +func (x *CommitSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommitSnapshotRequest.ProtoReflect.Descriptor instead. +func (*CommitSnapshotRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{7} +} + +func (x *CommitSnapshotRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +func (x *CommitSnapshotRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CommitSnapshotRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *CommitSnapshotRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type StatSnapshotRequest struct { - Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` } -func (m *StatSnapshotRequest) Reset() { *m = StatSnapshotRequest{} } -func (*StatSnapshotRequest) ProtoMessage() {} -func (*StatSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{8} -} -func (m *StatSnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatSnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *StatSnapshotRequest) Reset() { + *x = StatSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *StatSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatSnapshotRequest.Merge(m, src) -} -func (m *StatSnapshotRequest) XXX_Size() int { - return m.Size() -} -func (m *StatSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatSnapshotRequest.DiscardUnknown(m) + +func (x *StatSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_StatSnapshotRequest proto.InternalMessageInfo +func (*StatSnapshotRequest) ProtoMessage() {} + +func (x *StatSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatSnapshotRequest.ProtoReflect.Descriptor instead. +func (*StatSnapshotRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{8} +} + +func (x *StatSnapshotRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +func (x *StatSnapshotRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} type Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"` Kind Kind `protobuf:"varint,3,opt,name=kind,proto3,enum=containerd.services.snapshots.v1.Kind" json:"kind,omitempty"` // CreatedAt provides the time at which the snapshot was created. - CreatedAt time.Time `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` // UpdatedAt provides the time the info was last updated. - UpdatedAt time.Time `protobuf:"bytes,5,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *Info) Reset() { *m = Info{} } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{9} -} -func (m *Info) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Info.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Info) Reset() { + *x = Info{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Info) XXX_Merge(src proto.Message) { - xxx_messageInfo_Info.Merge(m, src) -} -func (m *Info) XXX_Size() int { - return m.Size() -} -func (m *Info) XXX_DiscardUnknown() { - xxx_messageInfo_Info.DiscardUnknown(m) + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_Info proto.InternalMessageInfo +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Info.ProtoReflect.Descriptor instead. +func (*Info) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{9} +} + +func (x *Info) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Info) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *Info) GetKind() Kind { + if x != nil { + return x.Kind + } + return Kind_UNKNOWN +} + +func (x *Info) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *Info) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *Info) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type StatSnapshotResponse struct { - Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` } -func (m *StatSnapshotResponse) Reset() { *m = StatSnapshotResponse{} } -func (*StatSnapshotResponse) ProtoMessage() {} -func (*StatSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{10} -} -func (m *StatSnapshotResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatSnapshotResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *StatSnapshotResponse) Reset() { + *x = StatSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *StatSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatSnapshotResponse.Merge(m, src) -} -func (m *StatSnapshotResponse) XXX_Size() int { - return m.Size() -} -func (m *StatSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatSnapshotResponse.DiscardUnknown(m) + +func (x *StatSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_StatSnapshotResponse proto.InternalMessageInfo +func (*StatSnapshotResponse) ProtoMessage() {} + +func (x *StatSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatSnapshotResponse.ProtoReflect.Descriptor instead. +func (*StatSnapshotResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{10} +} + +func (x *StatSnapshotResponse) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} type UpdateSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` - Info Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info"` + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. // // In info, Name, Parent, Kind, Created are immutable, // other field may be updated using this mask. // If no mask is provided, all mutable field are updated. - UpdateMask *types1.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } -func (m *UpdateSnapshotRequest) Reset() { *m = UpdateSnapshotRequest{} } -func (*UpdateSnapshotRequest) ProtoMessage() {} -func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{11} -} -func (m *UpdateSnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateSnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateSnapshotRequest) Reset() { + *x = UpdateSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateSnapshotRequest.Merge(m, src) -} -func (m *UpdateSnapshotRequest) XXX_Size() int { - return m.Size() -} -func (m *UpdateSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateSnapshotRequest.DiscardUnknown(m) + +func (x *UpdateSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_UpdateSnapshotRequest proto.InternalMessageInfo +func (*UpdateSnapshotRequest) ProtoMessage() {} + +func (x *UpdateSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSnapshotRequest.ProtoReflect.Descriptor instead. +func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{11} +} + +func (x *UpdateSnapshotRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +func (x *UpdateSnapshotRequest) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} + +func (x *UpdateSnapshotRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} type UpdateSnapshotResponse struct { - Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` } -func (m *UpdateSnapshotResponse) Reset() { *m = UpdateSnapshotResponse{} } -func (*UpdateSnapshotResponse) ProtoMessage() {} -func (*UpdateSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{12} -} -func (m *UpdateSnapshotResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateSnapshotResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateSnapshotResponse) Reset() { + *x = UpdateSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateSnapshotResponse.Merge(m, src) -} -func (m *UpdateSnapshotResponse) XXX_Size() int { - return m.Size() -} -func (m *UpdateSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateSnapshotResponse.DiscardUnknown(m) + +func (x *UpdateSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_UpdateSnapshotResponse proto.InternalMessageInfo +func (*UpdateSnapshotResponse) ProtoMessage() {} + +func (x *UpdateSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSnapshotResponse.ProtoReflect.Descriptor instead. +func (*UpdateSnapshotResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{12} +} + +func (x *UpdateSnapshotResponse) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} type ListSnapshotsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` // Filters contains one or more filters using the syntax defined in the // containerd filter package. @@ -623,4919 +890,826 @@ type ListSnapshotsRequest struct { // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. - Filters []string `protobuf:"bytes,2,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Filters []string `protobuf:"bytes,2,rep,name=filters,proto3" json:"filters,omitempty"` } -func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } -func (*ListSnapshotsRequest) ProtoMessage() {} -func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{13} -} -func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListSnapshotsRequest) Reset() { + *x = ListSnapshotsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListSnapshotsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsRequest.Merge(m, src) -} -func (m *ListSnapshotsRequest) XXX_Size() int { - return m.Size() -} -func (m *ListSnapshotsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m) + +func (x *ListSnapshotsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo +func (*ListSnapshotsRequest) ProtoMessage() {} + +func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnapshotsRequest.ProtoReflect.Descriptor instead. +func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{13} +} + +func (x *ListSnapshotsRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +func (x *ListSnapshotsRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil +} type ListSnapshotsResponse struct { - Info []Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info []*Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info,omitempty"` } -func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } -func (*ListSnapshotsResponse) ProtoMessage() {} -func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{14} -} -func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListSnapshotsResponse) Reset() { + *x = ListSnapshotsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListSnapshotsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsResponse.Merge(m, src) -} -func (m *ListSnapshotsResponse) XXX_Size() int { - return m.Size() -} -func (m *ListSnapshotsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m) + +func (x *ListSnapshotsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo +func (*ListSnapshotsResponse) ProtoMessage() {} + +func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnapshotsResponse.ProtoReflect.Descriptor instead. +func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{14} +} + +func (x *ListSnapshotsResponse) GetInfo() []*Info { + if x != nil { + return x.Info + } + return nil +} type UsageRequest struct { - Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` } -func (m *UsageRequest) Reset() { *m = UsageRequest{} } -func (*UsageRequest) ProtoMessage() {} -func (*UsageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{15} -} -func (m *UsageRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UsageRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UsageRequest) Reset() { + *x = UsageRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UsageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UsageRequest.Merge(m, src) -} -func (m *UsageRequest) XXX_Size() int { - return m.Size() -} -func (m *UsageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UsageRequest.DiscardUnknown(m) + +func (x *UsageRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_UsageRequest proto.InternalMessageInfo +func (*UsageRequest) ProtoMessage() {} + +func (x *UsageRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UsageRequest.ProtoReflect.Descriptor instead. +func (*UsageRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{15} +} + +func (x *UsageRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +func (x *UsageRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} type UsageResponse struct { - Size_ int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` - Inodes int64 `protobuf:"varint,2,opt,name=inodes,proto3" json:"inodes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Size int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` + Inodes int64 `protobuf:"varint,2,opt,name=inodes,proto3" json:"inodes,omitempty"` } -func (m *UsageResponse) Reset() { *m = UsageResponse{} } -func (*UsageResponse) ProtoMessage() {} -func (*UsageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{16} -} -func (m *UsageResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UsageResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UsageResponse) Reset() { + *x = UsageResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UsageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UsageResponse.Merge(m, src) -} -func (m *UsageResponse) XXX_Size() int { - return m.Size() -} -func (m *UsageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UsageResponse.DiscardUnknown(m) + +func (x *UsageResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_UsageResponse proto.InternalMessageInfo +func (*UsageResponse) ProtoMessage() {} + +func (x *UsageResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UsageResponse.ProtoReflect.Descriptor instead. +func (*UsageResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{16} +} + +func (x *UsageResponse) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *UsageResponse) GetInodes() int64 { + if x != nil { + return x.Inodes + } + return 0 +} type CleanupRequest struct { - Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` +} + +func (x *CleanupRequest) Reset() { + *x = CleanupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CleanupRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CleanupRequest) Reset() { *m = CleanupRequest{} } func (*CleanupRequest) ProtoMessage() {} + +func (x *CleanupRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CleanupRequest.ProtoReflect.Descriptor instead. func (*CleanupRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{17} -} -func (m *CleanupRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CleanupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CleanupRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CleanupRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CleanupRequest.Merge(m, src) -} -func (m *CleanupRequest) XXX_Size() int { - return m.Size() -} -func (m *CleanupRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CleanupRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CleanupRequest proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("containerd.services.snapshots.v1.Kind", Kind_name, Kind_value) - proto.RegisterType((*PrepareSnapshotRequest)(nil), "containerd.services.snapshots.v1.PrepareSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.snapshots.v1.PrepareSnapshotRequest.LabelsEntry") - proto.RegisterType((*PrepareSnapshotResponse)(nil), "containerd.services.snapshots.v1.PrepareSnapshotResponse") - proto.RegisterType((*ViewSnapshotRequest)(nil), "containerd.services.snapshots.v1.ViewSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.snapshots.v1.ViewSnapshotRequest.LabelsEntry") - proto.RegisterType((*ViewSnapshotResponse)(nil), "containerd.services.snapshots.v1.ViewSnapshotResponse") - proto.RegisterType((*MountsRequest)(nil), "containerd.services.snapshots.v1.MountsRequest") - proto.RegisterType((*MountsResponse)(nil), "containerd.services.snapshots.v1.MountsResponse") - proto.RegisterType((*RemoveSnapshotRequest)(nil), "containerd.services.snapshots.v1.RemoveSnapshotRequest") - proto.RegisterType((*CommitSnapshotRequest)(nil), "containerd.services.snapshots.v1.CommitSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.snapshots.v1.CommitSnapshotRequest.LabelsEntry") - proto.RegisterType((*StatSnapshotRequest)(nil), "containerd.services.snapshots.v1.StatSnapshotRequest") - proto.RegisterType((*Info)(nil), "containerd.services.snapshots.v1.Info") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.snapshots.v1.Info.LabelsEntry") - proto.RegisterType((*StatSnapshotResponse)(nil), "containerd.services.snapshots.v1.StatSnapshotResponse") - proto.RegisterType((*UpdateSnapshotRequest)(nil), "containerd.services.snapshots.v1.UpdateSnapshotRequest") - proto.RegisterType((*UpdateSnapshotResponse)(nil), "containerd.services.snapshots.v1.UpdateSnapshotResponse") - proto.RegisterType((*ListSnapshotsRequest)(nil), "containerd.services.snapshots.v1.ListSnapshotsRequest") - proto.RegisterType((*ListSnapshotsResponse)(nil), "containerd.services.snapshots.v1.ListSnapshotsResponse") - proto.RegisterType((*UsageRequest)(nil), "containerd.services.snapshots.v1.UsageRequest") - proto.RegisterType((*UsageResponse)(nil), "containerd.services.snapshots.v1.UsageResponse") - proto.RegisterType((*CleanupRequest)(nil), "containerd.services.snapshots.v1.CleanupRequest") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto", fileDescriptor_cfc0ddf12791f168) -} - -var fileDescriptor_cfc0ddf12791f168 = []byte{ - // 1047 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xda, 0x5b, 0x27, 0x79, 0x4e, 0x82, 0x99, 0x3a, 0xae, 0xb5, 0x20, 0x67, 0xe5, 0x03, - 0x8a, 0x38, 0xec, 0xb6, 0x46, 0xb4, 0x69, 0x73, 0xc1, 0x71, 0x0c, 0x72, 0xd2, 0xa4, 0x68, 0xf3, - 0xa7, 0x4d, 0x41, 0x54, 0x1b, 0x7b, 0xec, 0xac, 0xec, 0xfd, 0x83, 0x67, 0xec, 0xca, 0x20, 0x21, - 0x8e, 0x55, 0x4e, 0x7c, 0x81, 0x9c, 0xe0, 0x43, 0x20, 0x3e, 0x41, 0x8e, 0x48, 0x5c, 0x38, 0x01, - 0xcd, 0x97, 0xe0, 0x84, 0x40, 0x33, 0x3b, 0xeb, 0x7f, 0x71, 0xe5, 0xb5, 0x6b, 0x6e, 0x33, 0x3b, - 0xf3, 0xde, 0xfb, 0xbd, 0xdf, 0x9b, 0xf7, 0x9b, 0x59, 0xd8, 0xad, 0x5b, 0xf4, 0xbc, 0x7d, 0xa6, - 0x55, 0x5c, 0x5b, 0xaf, 0xb8, 0x0e, 0x35, 0x2d, 0x07, 0xb7, 0xaa, 0x83, 0x43, 0xd3, 0xb3, 0x74, - 0x82, 0x5b, 0x1d, 0xab, 0x82, 0x89, 0x4e, 0x1c, 0xd3, 0x23, 0xe7, 0x2e, 0x25, 0x7a, 0xe7, 0x5e, - 0x7f, 0xa2, 0x79, 0x2d, 0x97, 0xba, 0x48, 0xed, 0x5b, 0x69, 0x81, 0x85, 0xd6, 0xdf, 0xd4, 0xb9, - 0xa7, 0xa4, 0xea, 0x6e, 0xdd, 0xe5, 0x9b, 0x75, 0x36, 0xf2, 0xed, 0x94, 0xf7, 0xea, 0xae, 0x5b, - 0x6f, 0x62, 0x9d, 0xcf, 0xce, 0xda, 0x35, 0x1d, 0xdb, 0x1e, 0xed, 0x8a, 0x45, 0x75, 0x74, 0xb1, - 0x66, 0xe1, 0x66, 0xf5, 0x85, 0x6d, 0x92, 0x86, 0xd8, 0xb1, 0x3e, 0xba, 0x83, 0x5a, 0x36, 0x26, - 0xd4, 0xb4, 0x3d, 0xb1, 0xe1, 0x7e, 0xa8, 0x1c, 0x69, 0xd7, 0xc3, 0x44, 0xb7, 0xdd, 0xb6, 0x43, - 0x7d, 0xbb, 0xdc, 0x3f, 0x12, 0xa4, 0x3f, 0x6f, 0x61, 0xcf, 0x6c, 0xe1, 0x43, 0x91, 0x85, 0x81, - 0xbf, 0x6e, 0x63, 0x42, 0x91, 0x0a, 0x89, 0x20, 0x31, 0x8a, 0x5b, 0x19, 0x49, 0x95, 0x36, 0x96, - 0x8c, 0xc1, 0x4f, 0x28, 0x09, 0xb1, 0x06, 0xee, 0x66, 0xa2, 0x7c, 0x85, 0x0d, 0x51, 0x1a, 0xe2, - 0xcc, 0x95, 0x43, 0x33, 0x31, 0xfe, 0x51, 0xcc, 0xd0, 0x97, 0x10, 0x6f, 0x9a, 0x67, 0xb8, 0x49, - 0x32, 0xb2, 0x1a, 0xdb, 0x48, 0xe4, 0x77, 0xb4, 0x49, 0x3c, 0x6a, 0xe3, 0x51, 0x69, 0x8f, 0xb9, - 0x9b, 0x92, 0x43, 0x5b, 0x5d, 0x43, 0xf8, 0x54, 0x1e, 0x42, 0x62, 0xe0, 0x73, 0x00, 0x4b, 0xea, - 0xc3, 0x4a, 0xc1, 0xad, 0x8e, 0xd9, 0x6c, 0x63, 0x01, 0xd5, 0x9f, 0x3c, 0x8a, 0x6e, 0x4a, 0xb9, - 0x5d, 0xb8, 0x73, 0x23, 0x10, 0xf1, 0x5c, 0x87, 0x60, 0xa4, 0x43, 0x9c, 0x33, 0x45, 0x32, 0x12, - 0xc7, 0x7c, 0x67, 0x10, 0x33, 0x67, 0x52, 0xdb, 0x67, 0xeb, 0x86, 0xd8, 0x96, 0xfb, 0x5b, 0x82, - 0xdb, 0x27, 0x16, 0x7e, 0xf9, 0x7f, 0x12, 0x79, 0x3a, 0x42, 0x64, 0x61, 0x32, 0x91, 0x63, 0x20, - 0xcd, 0x9b, 0xc5, 0xcf, 0x20, 0x35, 0x1c, 0x65, 0x56, 0x0a, 0x8b, 0xb0, 0xc2, 0x3f, 0x90, 0xb7, - 0xe0, 0x2e, 0x57, 0x80, 0xd5, 0xc0, 0xc9, 0xac, 0x38, 0xf6, 0x60, 0xcd, 0xc0, 0xb6, 0xdb, 0x99, - 0x47, 0x53, 0xb0, 0x73, 0xb1, 0x56, 0x74, 0x6d, 0xdb, 0xa2, 0xd3, 0x7b, 0x43, 0x20, 0x3b, 0xa6, - 0x1d, 0x50, 0xce, 0xc7, 0x41, 0x84, 0x58, 0xbf, 0x32, 0x5f, 0x8c, 0x9c, 0x8a, 0xe2, 0xe4, 0x53, - 0x31, 0x16, 0xd0, 0xbc, 0xcf, 0x45, 0x19, 0x6e, 0x1f, 0x52, 0x93, 0xce, 0x83, 0xc4, 0x7f, 0xa3, - 0x20, 0x97, 0x9d, 0x9a, 0xdb, 0x63, 0x44, 0x1a, 0x60, 0xa4, 0xdf, 0x2d, 0xd1, 0xa1, 0x6e, 0x79, - 0x04, 0x72, 0xc3, 0x72, 0xaa, 0x9c, 0xaa, 0xd5, 0xfc, 0x07, 0x93, 0x59, 0xd9, 0xb3, 0x9c, 0xaa, - 0xc1, 0x6d, 0x50, 0x11, 0xa0, 0xd2, 0xc2, 0x26, 0xc5, 0xd5, 0x17, 0x26, 0xcd, 0xc8, 0xaa, 0xb4, - 0x91, 0xc8, 0x2b, 0x9a, 0xaf, 0xc3, 0x5a, 0xa0, 0xc3, 0xda, 0x51, 0xa0, 0xc3, 0xdb, 0x8b, 0x57, - 0x7f, 0xac, 0x47, 0x7e, 0xf8, 0x73, 0x5d, 0x32, 0x96, 0x84, 0x5d, 0x81, 0x32, 0x27, 0x6d, 0xaf, - 0x1a, 0x38, 0xb9, 0x35, 0x8d, 0x13, 0x61, 0x57, 0xa0, 0x68, 0xb7, 0x57, 0xdd, 0x38, 0xaf, 0x6e, - 0x7e, 0x72, 0x1e, 0x8c, 0xa9, 0x79, 0x17, 0xf3, 0x19, 0xa4, 0x86, 0x8b, 0x29, 0x9a, 0xeb, 0x13, - 0x90, 0x2d, 0xa7, 0xe6, 0x72, 0x27, 0x89, 0x30, 0x24, 0x33, 0x70, 0xdb, 0x32, 0xcb, 0xd4, 0xe0, - 0x96, 0xb9, 0x9f, 0x25, 0x58, 0x3b, 0xe6, 0xe9, 0x4e, 0x7f, 0x52, 0x82, 0xe8, 0xd1, 0x59, 0xa3, - 0xa3, 0x2d, 0x48, 0xf8, 0x5c, 0xf3, 0x0b, 0x97, 0x9f, 0x95, 0x71, 0x45, 0xfa, 0x94, 0xdd, 0xc9, - 0xfb, 0x26, 0x69, 0x18, 0xa2, 0xa4, 0x6c, 0x9c, 0x7b, 0x0e, 0xe9, 0x51, 0xe4, 0x73, 0xa3, 0xc5, - 0x80, 0xd4, 0x63, 0x8b, 0xf4, 0x08, 0x9f, 0x42, 0x13, 0x33, 0xb0, 0x50, 0xb3, 0x9a, 0x14, 0xb7, - 0x48, 0x26, 0xaa, 0xc6, 0x36, 0x96, 0x8c, 0x60, 0x9a, 0x3b, 0x85, 0xb5, 0x11, 0x9f, 0x37, 0xe0, - 0xc6, 0x66, 0x84, 0xbb, 0x0d, 0xcb, 0xc7, 0xc4, 0xac, 0xe3, 0xb7, 0xe9, 0xf2, 0x2d, 0x58, 0x11, - 0x3e, 0x04, 0x2c, 0x04, 0x32, 0xb1, 0xbe, 0xf1, 0xbb, 0x3d, 0x66, 0xf0, 0x31, 0xeb, 0x76, 0xcb, - 0x71, 0xab, 0x98, 0x70, 0xcb, 0x98, 0x21, 0x66, 0xb9, 0x3c, 0xac, 0x16, 0x9b, 0xd8, 0x74, 0xda, - 0x5e, 0x68, 0x08, 0x1f, 0xbe, 0x92, 0x40, 0x66, 0x4d, 0x8f, 0xde, 0x87, 0x85, 0xe3, 0x83, 0xbd, - 0x83, 0x27, 0x4f, 0x0f, 0x92, 0x11, 0xe5, 0x9d, 0x8b, 0x4b, 0x35, 0xc1, 0x3e, 0x1f, 0x3b, 0x0d, - 0xc7, 0x7d, 0xe9, 0xa0, 0x34, 0xc8, 0x27, 0xe5, 0xd2, 0xd3, 0xa4, 0xa4, 0x2c, 0x5f, 0x5c, 0xaa, - 0x8b, 0x6c, 0x89, 0x5d, 0x78, 0x48, 0x81, 0x78, 0xa1, 0x78, 0x54, 0x3e, 0x29, 0x25, 0xa3, 0xca, - 0xea, 0xc5, 0xa5, 0x0a, 0x6c, 0xa5, 0x50, 0xa1, 0x56, 0x07, 0x23, 0x15, 0x96, 0x8a, 0x4f, 0xf6, - 0xf7, 0xcb, 0x47, 0x47, 0xa5, 0x9d, 0x64, 0x4c, 0x79, 0xf7, 0xe2, 0x52, 0x5d, 0x61, 0xcb, 0xbe, - 0xf2, 0x52, 0x5c, 0x55, 0x96, 0x5f, 0xfd, 0x98, 0x8d, 0xfc, 0xf2, 0x53, 0x96, 0x23, 0xc8, 0xff, - 0xb6, 0x08, 0x4b, 0xbd, 0xba, 0xa0, 0xef, 0x60, 0x41, 0x3c, 0x4c, 0xd0, 0xe6, 0xac, 0x8f, 0x25, - 0xe5, 0xe1, 0x0c, 0x96, 0x82, 0xf8, 0x36, 0xc8, 0x3c, 0xc3, 0x8f, 0x67, 0x7a, 0x60, 0x28, 0xf7, - 0xa7, 0x35, 0x13, 0x61, 0x1b, 0x10, 0xf7, 0xef, 0x6e, 0xa4, 0x4f, 0xf6, 0x30, 0xf4, 0x54, 0x50, - 0xee, 0x86, 0x37, 0x10, 0xc1, 0x4e, 0x21, 0xee, 0x17, 0x03, 0x3d, 0x98, 0xf1, 0xc2, 0x54, 0xd2, - 0x37, 0x74, 0xa2, 0xc4, 0x1e, 0xf6, 0xcc, 0xb5, 0xff, 0x80, 0x08, 0xe3, 0x7a, 0xec, 0x53, 0xe3, - 0x8d, 0xae, 0xdb, 0x20, 0x33, 0x1d, 0x0e, 0x53, 0x99, 0x31, 0x97, 0x6f, 0x98, 0xca, 0x8c, 0x95, - 0xf9, 0x6f, 0x21, 0xee, 0x2b, 0x5d, 0x98, 0x8c, 0xc6, 0xaa, 0xb9, 0xb2, 0x39, 0xbd, 0xa1, 0x08, - 0xde, 0x05, 0x99, 0xc9, 0x16, 0x0a, 0x01, 0x7e, 0x9c, 0x64, 0x2a, 0x0f, 0xa6, 0xb6, 0xf3, 0x03, - 0xdf, 0x95, 0xd0, 0x39, 0xdc, 0xe2, 0x92, 0x84, 0xb4, 0x10, 0xe8, 0x07, 0xf4, 0x4f, 0xd1, 0x43, - 0xef, 0x17, 0x49, 0x1e, 0xc2, 0x82, 0xd0, 0x2f, 0x14, 0xe2, 0x2c, 0x0f, 0x4b, 0xdd, 0x9b, 0x4e, - 0xcb, 0xf6, 0x57, 0x57, 0xaf, 0xb3, 0x91, 0xdf, 0x5f, 0x67, 0x23, 0xdf, 0x5f, 0x67, 0xa5, 0xab, - 0xeb, 0xac, 0xf4, 0xeb, 0x75, 0x56, 0xfa, 0xeb, 0x3a, 0x2b, 0x3d, 0xdf, 0x99, 0xfd, 0xb7, 0x78, - 0xab, 0x37, 0x79, 0x16, 0x39, 0x8b, 0xf3, 0x88, 0x1f, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x20, - 0x9c, 0x85, 0x7f, 0x67, 0x0f, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// SnapshotsClient is the client API for Snapshots service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type SnapshotsClient interface { - Prepare(ctx context.Context, in *PrepareSnapshotRequest, opts ...grpc.CallOption) (*PrepareSnapshotResponse, error) - View(ctx context.Context, in *ViewSnapshotRequest, opts ...grpc.CallOption) (*ViewSnapshotResponse, error) - Mounts(ctx context.Context, in *MountsRequest, opts ...grpc.CallOption) (*MountsResponse, error) - Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*types1.Empty, error) - Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*types1.Empty, error) - Stat(ctx context.Context, in *StatSnapshotRequest, opts ...grpc.CallOption) (*StatSnapshotResponse, error) - Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*UpdateSnapshotResponse, error) - List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (Snapshots_ListClient, error) - Usage(ctx context.Context, in *UsageRequest, opts ...grpc.CallOption) (*UsageResponse, error) - Cleanup(ctx context.Context, in *CleanupRequest, opts ...grpc.CallOption) (*types1.Empty, error) -} - -type snapshotsClient struct { - cc *grpc.ClientConn -} - -func NewSnapshotsClient(cc *grpc.ClientConn) SnapshotsClient { - return &snapshotsClient{cc} -} - -func (c *snapshotsClient) Prepare(ctx context.Context, in *PrepareSnapshotRequest, opts ...grpc.CallOption) (*PrepareSnapshotResponse, error) { - out := new(PrepareSnapshotResponse) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Prepare", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) View(ctx context.Context, in *ViewSnapshotRequest, opts ...grpc.CallOption) (*ViewSnapshotResponse, error) { - out := new(ViewSnapshotResponse) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/View", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) Mounts(ctx context.Context, in *MountsRequest, opts ...grpc.CallOption) (*MountsResponse, error) { - out := new(MountsResponse) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Mounts", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Commit", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Remove", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) Stat(ctx context.Context, in *StatSnapshotRequest, opts ...grpc.CallOption) (*StatSnapshotResponse, error) { - out := new(StatSnapshotResponse) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Stat", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*UpdateSnapshotResponse, error) { - out := new(UpdateSnapshotResponse) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (Snapshots_ListClient, error) { - stream, err := c.cc.NewStream(ctx, &_Snapshots_serviceDesc.Streams[0], "/containerd.services.snapshots.v1.Snapshots/List", opts...) - if err != nil { - return nil, err - } - x := &snapshotsListClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Snapshots_ListClient interface { - Recv() (*ListSnapshotsResponse, error) - grpc.ClientStream -} - -type snapshotsListClient struct { - grpc.ClientStream -} - -func (x *snapshotsListClient) Recv() (*ListSnapshotsResponse, error) { - m := new(ListSnapshotsResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *snapshotsClient) Usage(ctx context.Context, in *UsageRequest, opts ...grpc.CallOption) (*UsageResponse, error) { - out := new(UsageResponse) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Usage", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) Cleanup(ctx context.Context, in *CleanupRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Cleanup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SnapshotsServer is the server API for Snapshots service. -type SnapshotsServer interface { - Prepare(context.Context, *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error) - View(context.Context, *ViewSnapshotRequest) (*ViewSnapshotResponse, error) - Mounts(context.Context, *MountsRequest) (*MountsResponse, error) - Commit(context.Context, *CommitSnapshotRequest) (*types1.Empty, error) - Remove(context.Context, *RemoveSnapshotRequest) (*types1.Empty, error) - Stat(context.Context, *StatSnapshotRequest) (*StatSnapshotResponse, error) - Update(context.Context, *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error) - List(*ListSnapshotsRequest, Snapshots_ListServer) error - Usage(context.Context, *UsageRequest) (*UsageResponse, error) - Cleanup(context.Context, *CleanupRequest) (*types1.Empty, error) -} - -// UnimplementedSnapshotsServer can be embedded to have forward compatible implementations. -type UnimplementedSnapshotsServer struct { -} - -func (*UnimplementedSnapshotsServer) Prepare(ctx context.Context, req *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Prepare not implemented") -} -func (*UnimplementedSnapshotsServer) View(ctx context.Context, req *ViewSnapshotRequest) (*ViewSnapshotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method View not implemented") -} -func (*UnimplementedSnapshotsServer) Mounts(ctx context.Context, req *MountsRequest) (*MountsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Mounts not implemented") -} -func (*UnimplementedSnapshotsServer) Commit(ctx context.Context, req *CommitSnapshotRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") -} -func (*UnimplementedSnapshotsServer) Remove(ctx context.Context, req *RemoveSnapshotRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Remove not implemented") -} -func (*UnimplementedSnapshotsServer) Stat(ctx context.Context, req *StatSnapshotRequest) (*StatSnapshotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Stat not implemented") -} -func (*UnimplementedSnapshotsServer) Update(ctx context.Context, req *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (*UnimplementedSnapshotsServer) List(req *ListSnapshotsRequest, srv Snapshots_ListServer) error { - return status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedSnapshotsServer) Usage(ctx context.Context, req *UsageRequest) (*UsageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Usage not implemented") -} -func (*UnimplementedSnapshotsServer) Cleanup(ctx context.Context, req *CleanupRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Cleanup not implemented") -} - -func RegisterSnapshotsServer(s *grpc.Server, srv SnapshotsServer) { - s.RegisterService(&_Snapshots_serviceDesc, srv) -} - -func _Snapshots_Prepare_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PrepareSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Prepare(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Prepare", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Prepare(ctx, req.(*PrepareSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Snapshots_View_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ViewSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).View(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/View", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).View(ctx, req.(*ViewSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Snapshots_Mounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MountsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Mounts(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Mounts", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Mounts(ctx, req.(*MountsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Snapshots_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CommitSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Commit(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Commit", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Commit(ctx, req.(*CommitSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Snapshots_Remove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Remove(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Remove", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Remove(ctx, req.(*RemoveSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Snapshots_Stat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Stat(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Stat", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Stat(ctx, req.(*StatSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Snapshots_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Update(ctx, req.(*UpdateSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Snapshots_List_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ListSnapshotsRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(SnapshotsServer).List(m, &snapshotsListServer{stream}) -} - -type Snapshots_ListServer interface { - Send(*ListSnapshotsResponse) error - grpc.ServerStream -} - -type snapshotsListServer struct { - grpc.ServerStream -} - -func (x *snapshotsListServer) Send(m *ListSnapshotsResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Snapshots_Usage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UsageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Usage(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Usage", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Usage(ctx, req.(*UsageRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Snapshots_Cleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CleanupRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Cleanup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Cleanup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Cleanup(ctx, req.(*CleanupRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Snapshots_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.snapshots.v1.Snapshots", - HandlerType: (*SnapshotsServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Prepare", - Handler: _Snapshots_Prepare_Handler, - }, - { - MethodName: "View", - Handler: _Snapshots_View_Handler, - }, - { - MethodName: "Mounts", - Handler: _Snapshots_Mounts_Handler, - }, - { - MethodName: "Commit", - Handler: _Snapshots_Commit_Handler, - }, - { - MethodName: "Remove", - Handler: _Snapshots_Remove_Handler, - }, - { - MethodName: "Stat", - Handler: _Snapshots_Stat_Handler, - }, - { - MethodName: "Update", - Handler: _Snapshots_Update_Handler, - }, - { - MethodName: "Usage", - Handler: _Snapshots_Usage_Handler, - }, - { - MethodName: "Cleanup", - Handler: _Snapshots_Cleanup_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "List", - Handler: _Snapshots_List_Handler, - ServerStreams: true, - }, - }, - Metadata: "github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto", -} - -func (m *PrepareSnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrepareSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrepareSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintSnapshots(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintSnapshots(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintSnapshots(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Parent) > 0 { - i -= len(m.Parent) - copy(dAtA[i:], m.Parent) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent))) - i-- - dAtA[i] = 0x1a - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PrepareSnapshotResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrepareSnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrepareSnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Mounts) > 0 { - for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ViewSnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ViewSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ViewSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintSnapshots(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintSnapshots(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintSnapshots(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Parent) > 0 { - i -= len(m.Parent) - copy(dAtA[i:], m.Parent) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent))) - i-- - dAtA[i] = 0x1a - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ViewSnapshotResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ViewSnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ViewSnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Mounts) > 0 { - for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *MountsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MountsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MountsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MountsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MountsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MountsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Mounts) > 0 { - for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *RemoveSnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RemoveSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CommitSnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CommitSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CommitSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintSnapshots(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintSnapshots(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintSnapshots(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x1a - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StatSnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Info) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Info) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintSnapshots(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintSnapshots(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintSnapshots(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x32 - } - } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintSnapshots(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x2a - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintSnapshots(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x22 - if m.Kind != 0 { - i = encodeVarintSnapshots(dAtA, i, uint64(m.Kind)) - i-- - dAtA[i] = 0x18 - } - if len(m.Parent) > 0 { - i -= len(m.Parent) - copy(dAtA[i:], m.Parent) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StatSnapshotResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatSnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatSnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateSnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.UpdateMask != nil { - { - size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UpdateSnapshotResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateSnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateSnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ListSnapshotsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListSnapshotsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListSnapshotsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListSnapshotsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListSnapshotsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListSnapshotsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Info) > 0 { - for iNdEx := len(m.Info) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Info[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *UsageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UsageRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UsageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UsageResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UsageResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UsageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Inodes != 0 { - i = encodeVarintSnapshots(dAtA, i, uint64(m.Inodes)) - i-- - dAtA[i] = 0x10 - } - if m.Size_ != 0 { - i = encodeVarintSnapshots(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CleanupRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CleanupRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CleanupRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintSnapshots(dAtA []byte, offset int, v uint64) int { - offset -= sovSnapshots(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PrepareSnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Parent) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v))) - n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PrepareSnapshotResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Mounts) > 0 { - for _, e := range m.Mounts { - l = e.Size() - n += 1 + l + sovSnapshots(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ViewSnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Parent) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v))) - n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ViewSnapshotResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Mounts) > 0 { - for _, e := range m.Mounts { - l = e.Size() - n += 1 + l + sovSnapshots(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MountsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MountsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Mounts) > 0 { - for _, e := range m.Mounts { - l = e.Size() - n += 1 + l + sovSnapshots(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RemoveSnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CommitSnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v))) - n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatSnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Info) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Parent) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.Kind != 0 { - n += 1 + sovSnapshots(uint64(m.Kind)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovSnapshots(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovSnapshots(uint64(l)) - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v))) - n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatSnapshotResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Info.Size() - n += 1 + l + sovSnapshots(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateSnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = m.Info.Size() - n += 1 + l + sovSnapshots(uint64(l)) - if m.UpdateMask != nil { - l = m.UpdateMask.Size() - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateSnapshotResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Info.Size() - n += 1 + l + sovSnapshots(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListSnapshotsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovSnapshots(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListSnapshotsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Info) > 0 { - for _, e := range m.Info { - l = e.Size() - n += 1 + l + sovSnapshots(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UsageRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UsageResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Size_ != 0 { - n += 1 + sovSnapshots(uint64(m.Size_)) - } - if m.Inodes != 0 { - n += 1 + sovSnapshots(uint64(m.Inodes)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CleanupRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovSnapshots(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSnapshots(x uint64) (n int) { - return sovSnapshots(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PrepareSnapshotRequest) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&PrepareSnapshotRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Parent:` + fmt.Sprintf("%v", this.Parent) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PrepareSnapshotResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForMounts := "[]*Mount{" - for _, f := range this.Mounts { - repeatedStringForMounts += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForMounts += "}" - s := strings.Join([]string{`&PrepareSnapshotResponse{`, - `Mounts:` + repeatedStringForMounts + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ViewSnapshotRequest) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&ViewSnapshotRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Parent:` + fmt.Sprintf("%v", this.Parent) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ViewSnapshotResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForMounts := "[]*Mount{" - for _, f := range this.Mounts { - repeatedStringForMounts += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForMounts += "}" - s := strings.Join([]string{`&ViewSnapshotResponse{`, - `Mounts:` + repeatedStringForMounts + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MountsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MountsRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MountsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForMounts := "[]*Mount{" - for _, f := range this.Mounts { - repeatedStringForMounts += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForMounts += "}" - s := strings.Join([]string{`&MountsResponse{`, - `Mounts:` + repeatedStringForMounts + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RemoveSnapshotRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RemoveSnapshotRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CommitSnapshotRequest) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&CommitSnapshotRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatSnapshotRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatSnapshotRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Info) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&Info{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Parent:` + fmt.Sprintf("%v", this.Parent) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatSnapshotResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatSnapshotResponse{`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateSnapshotRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateSnapshotRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types1.FieldMask", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateSnapshotResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateSnapshotResponse{`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListSnapshotsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListSnapshotsRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListSnapshotsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForInfo := "[]Info{" - for _, f := range this.Info { - repeatedStringForInfo += strings.Replace(strings.Replace(f.String(), "Info", "Info", 1), `&`, ``, 1) + "," - } - repeatedStringForInfo += "}" - s := strings.Join([]string{`&ListSnapshotsResponse{`, - `Info:` + repeatedStringForInfo + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UsageRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UsageRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UsageResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UsageResponse{`, - `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, - `Inodes:` + fmt.Sprintf("%v", this.Inodes) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CleanupRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CleanupRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringSnapshots(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PrepareSnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrepareSnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrepareSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parent = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrepareSnapshotResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrepareSnapshotResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrepareSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mounts = append(m.Mounts, &types.Mount{}) - if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ViewSnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ViewSnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ViewSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parent = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ViewSnapshotResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ViewSnapshotResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ViewSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mounts = append(m.Mounts, &types.Mount{}) - if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MountsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MountsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MountsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MountsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MountsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MountsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mounts = append(m.Mounts, &types.Mount{}) - if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveSnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveSnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CommitSnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CommitSnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CommitSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatSnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatSnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Info) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Info: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parent = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - m.Kind = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Kind |= Kind(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatSnapshotResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatSnapshotResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateSnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateSnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdateMask == nil { - m.UpdateMask = &types1.FieldMask{} - } - if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateSnapshotResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateSnapshotResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListSnapshotsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListSnapshotsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListSnapshotsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListSnapshotsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListSnapshotsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListSnapshotsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Info = append(m.Info, Info{}) - if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UsageRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UsageRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UsageRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UsageResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UsageResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UsageResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Inodes", wireType) - } - m.Inodes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Inodes |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CleanupRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CleanupRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CleanupRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSnapshots(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshots - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshots - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshots - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSnapshots - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSnapshots - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSnapshots - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{17} +} + +func (x *CleanupRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +var File_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDesc = []byte{ + 0x0a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfd, 0x01, 0x0a, 0x16, 0x50, 0x72, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5c, 0x0a, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x17, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x6d, 0x6f, 0x75, 0x6e, + 0x74, 0x73, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x56, 0x69, 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x59, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x14, + 0x56, 0x69, 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x22, 0x43, 0x0a, 0x0d, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x41, 0x0a, 0x0e, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x22, 0x4b, 0x0a, + 0x15, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0xf7, 0x01, 0x0a, 0x15, 0x43, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x5b, 0x0a, 0x06, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x74, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, + 0xeb, 0x02, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x4a, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x52, 0x0a, + 0x14, 0x53, 0x74, 0x61, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, + 0x6f, 0x22, 0xb2, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x3a, 0x0a, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x54, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x3a, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x52, 0x0a, 0x14, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x22, 0x53, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x04, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x42, 0x0a, 0x0c, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x3b, 0x0a, 0x0d, 0x55, 0x73, 0x61, + 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, + 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x32, 0x0a, 0x0e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x2a, 0x38, 0x0a, 0x04, 0x4b, 0x69, + 0x6e, 0x64, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x08, 0x0a, 0x04, 0x56, 0x49, 0x45, 0x57, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, + 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x54, + 0x45, 0x44, 0x10, 0x03, 0x32, 0xd3, 0x08, 0x0a, 0x09, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x12, 0x7e, 0x0a, 0x07, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x12, 0x38, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x75, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x69, + 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x06, 0x4d, 0x6f, 0x75, + 0x6e, 0x74, 0x73, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x06, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x12, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x59, 0x0a, 0x06, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x37, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x75, 0x0a, 0x04, + 0x53, 0x74, 0x61, 0x74, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x37, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x79, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x68, 0x0a, 0x05, 0x55, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x07, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, + 0x12, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthSnapshots = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSnapshots = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSnapshots = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescData = file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes = make([]protoimpl.MessageInfo, 22) +var file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_goTypes = []interface{}{ + (Kind)(0), // 0: containerd.services.snapshots.v1.Kind + (*PrepareSnapshotRequest)(nil), // 1: containerd.services.snapshots.v1.PrepareSnapshotRequest + (*PrepareSnapshotResponse)(nil), // 2: containerd.services.snapshots.v1.PrepareSnapshotResponse + (*ViewSnapshotRequest)(nil), // 3: containerd.services.snapshots.v1.ViewSnapshotRequest + (*ViewSnapshotResponse)(nil), // 4: containerd.services.snapshots.v1.ViewSnapshotResponse + (*MountsRequest)(nil), // 5: containerd.services.snapshots.v1.MountsRequest + (*MountsResponse)(nil), // 6: containerd.services.snapshots.v1.MountsResponse + (*RemoveSnapshotRequest)(nil), // 7: containerd.services.snapshots.v1.RemoveSnapshotRequest + (*CommitSnapshotRequest)(nil), // 8: containerd.services.snapshots.v1.CommitSnapshotRequest + (*StatSnapshotRequest)(nil), // 9: containerd.services.snapshots.v1.StatSnapshotRequest + (*Info)(nil), // 10: containerd.services.snapshots.v1.Info + (*StatSnapshotResponse)(nil), // 11: containerd.services.snapshots.v1.StatSnapshotResponse + (*UpdateSnapshotRequest)(nil), // 12: containerd.services.snapshots.v1.UpdateSnapshotRequest + (*UpdateSnapshotResponse)(nil), // 13: containerd.services.snapshots.v1.UpdateSnapshotResponse + (*ListSnapshotsRequest)(nil), // 14: containerd.services.snapshots.v1.ListSnapshotsRequest + (*ListSnapshotsResponse)(nil), // 15: containerd.services.snapshots.v1.ListSnapshotsResponse + (*UsageRequest)(nil), // 16: containerd.services.snapshots.v1.UsageRequest + (*UsageResponse)(nil), // 17: containerd.services.snapshots.v1.UsageResponse + (*CleanupRequest)(nil), // 18: containerd.services.snapshots.v1.CleanupRequest + nil, // 19: containerd.services.snapshots.v1.PrepareSnapshotRequest.LabelsEntry + nil, // 20: containerd.services.snapshots.v1.ViewSnapshotRequest.LabelsEntry + nil, // 21: containerd.services.snapshots.v1.CommitSnapshotRequest.LabelsEntry + nil, // 22: containerd.services.snapshots.v1.Info.LabelsEntry + (*types.Mount)(nil), // 23: containerd.types.Mount + (*timestamppb.Timestamp)(nil), // 24: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 25: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 26: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_depIdxs = []int32{ + 19, // 0: containerd.services.snapshots.v1.PrepareSnapshotRequest.labels:type_name -> containerd.services.snapshots.v1.PrepareSnapshotRequest.LabelsEntry + 23, // 1: containerd.services.snapshots.v1.PrepareSnapshotResponse.mounts:type_name -> containerd.types.Mount + 20, // 2: containerd.services.snapshots.v1.ViewSnapshotRequest.labels:type_name -> containerd.services.snapshots.v1.ViewSnapshotRequest.LabelsEntry + 23, // 3: containerd.services.snapshots.v1.ViewSnapshotResponse.mounts:type_name -> containerd.types.Mount + 23, // 4: containerd.services.snapshots.v1.MountsResponse.mounts:type_name -> containerd.types.Mount + 21, // 5: containerd.services.snapshots.v1.CommitSnapshotRequest.labels:type_name -> containerd.services.snapshots.v1.CommitSnapshotRequest.LabelsEntry + 0, // 6: containerd.services.snapshots.v1.Info.kind:type_name -> containerd.services.snapshots.v1.Kind + 24, // 7: containerd.services.snapshots.v1.Info.created_at:type_name -> google.protobuf.Timestamp + 24, // 8: containerd.services.snapshots.v1.Info.updated_at:type_name -> google.protobuf.Timestamp + 22, // 9: containerd.services.snapshots.v1.Info.labels:type_name -> containerd.services.snapshots.v1.Info.LabelsEntry + 10, // 10: containerd.services.snapshots.v1.StatSnapshotResponse.info:type_name -> containerd.services.snapshots.v1.Info + 10, // 11: containerd.services.snapshots.v1.UpdateSnapshotRequest.info:type_name -> containerd.services.snapshots.v1.Info + 25, // 12: containerd.services.snapshots.v1.UpdateSnapshotRequest.update_mask:type_name -> google.protobuf.FieldMask + 10, // 13: containerd.services.snapshots.v1.UpdateSnapshotResponse.info:type_name -> containerd.services.snapshots.v1.Info + 10, // 14: containerd.services.snapshots.v1.ListSnapshotsResponse.info:type_name -> containerd.services.snapshots.v1.Info + 1, // 15: containerd.services.snapshots.v1.Snapshots.Prepare:input_type -> containerd.services.snapshots.v1.PrepareSnapshotRequest + 3, // 16: containerd.services.snapshots.v1.Snapshots.View:input_type -> containerd.services.snapshots.v1.ViewSnapshotRequest + 5, // 17: containerd.services.snapshots.v1.Snapshots.Mounts:input_type -> containerd.services.snapshots.v1.MountsRequest + 8, // 18: containerd.services.snapshots.v1.Snapshots.Commit:input_type -> containerd.services.snapshots.v1.CommitSnapshotRequest + 7, // 19: containerd.services.snapshots.v1.Snapshots.Remove:input_type -> containerd.services.snapshots.v1.RemoveSnapshotRequest + 9, // 20: containerd.services.snapshots.v1.Snapshots.Stat:input_type -> containerd.services.snapshots.v1.StatSnapshotRequest + 12, // 21: containerd.services.snapshots.v1.Snapshots.Update:input_type -> containerd.services.snapshots.v1.UpdateSnapshotRequest + 14, // 22: containerd.services.snapshots.v1.Snapshots.List:input_type -> containerd.services.snapshots.v1.ListSnapshotsRequest + 16, // 23: containerd.services.snapshots.v1.Snapshots.Usage:input_type -> containerd.services.snapshots.v1.UsageRequest + 18, // 24: containerd.services.snapshots.v1.Snapshots.Cleanup:input_type -> containerd.services.snapshots.v1.CleanupRequest + 2, // 25: containerd.services.snapshots.v1.Snapshots.Prepare:output_type -> containerd.services.snapshots.v1.PrepareSnapshotResponse + 4, // 26: containerd.services.snapshots.v1.Snapshots.View:output_type -> containerd.services.snapshots.v1.ViewSnapshotResponse + 6, // 27: containerd.services.snapshots.v1.Snapshots.Mounts:output_type -> containerd.services.snapshots.v1.MountsResponse + 26, // 28: containerd.services.snapshots.v1.Snapshots.Commit:output_type -> google.protobuf.Empty + 26, // 29: containerd.services.snapshots.v1.Snapshots.Remove:output_type -> google.protobuf.Empty + 11, // 30: containerd.services.snapshots.v1.Snapshots.Stat:output_type -> containerd.services.snapshots.v1.StatSnapshotResponse + 13, // 31: containerd.services.snapshots.v1.Snapshots.Update:output_type -> containerd.services.snapshots.v1.UpdateSnapshotResponse + 15, // 32: containerd.services.snapshots.v1.Snapshots.List:output_type -> containerd.services.snapshots.v1.ListSnapshotsResponse + 17, // 33: containerd.services.snapshots.v1.Snapshots.Usage:output_type -> containerd.services.snapshots.v1.UsageResponse + 26, // 34: containerd.services.snapshots.v1.Snapshots.Cleanup:output_type -> google.protobuf.Empty + 25, // [25:35] is the sub-list for method output_type + 15, // [15:25] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_init() } +func file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_init() { + if File_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ViewSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ViewSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MountsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MountsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommitSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSnapshotsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSnapshotsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UsageRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UsageResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CleanupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDesc, + NumEnums: 1, + NumMessages: 22, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_depIdxs, + EnumInfos: file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_enumTypes, + MessageInfos: file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto = out.File + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto index 9bbef1429e..170ff473e2 100644 --- a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto +++ b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto @@ -18,7 +18,6 @@ syntax = "proto3"; package containerd.services.snapshots.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; @@ -101,13 +100,10 @@ message StatSnapshotRequest { } enum Kind { - option (gogoproto.goproto_enum_prefix) = false; - option (gogoproto.enum_customname) = "Kind"; - - UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "KindUnknown"]; - VIEW = 1 [(gogoproto.enumvalue_customname) = "KindView"]; - ACTIVE = 2 [(gogoproto.enumvalue_customname) = "KindActive"]; - COMMITTED = 3 [(gogoproto.enumvalue_customname) = "KindCommitted"]; + UNKNOWN = 0; + VIEW = 1; + ACTIVE = 2; + COMMITTED = 3; } message Info { @@ -116,10 +112,10 @@ message Info { Kind kind = 3; // CreatedAt provides the time at which the snapshot was created. - google.protobuf.Timestamp created_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp created_at = 4; // UpdatedAt provides the time the info was last updated. - google.protobuf.Timestamp updated_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp updated_at = 5; // Labels are arbitrary data on snapshots. // @@ -128,12 +124,12 @@ message Info { } message StatSnapshotResponse { - Info info = 1 [(gogoproto.nullable) = false]; + Info info = 1; } message UpdateSnapshotRequest { string snapshotter = 1; - Info info = 2 [(gogoproto.nullable) = false]; + Info info = 2; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. @@ -145,7 +141,7 @@ message UpdateSnapshotRequest { } message UpdateSnapshotResponse { - Info info = 1 [(gogoproto.nullable) = false]; + Info info = 1; } message ListSnapshotsRequest{ @@ -165,7 +161,7 @@ message ListSnapshotsRequest{ } message ListSnapshotsResponse { - repeated Info info = 1 [(gogoproto.nullable) = false]; + repeated Info info = 1; } message UsageRequest { diff --git a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots_grpc.pb.go new file mode 100644 index 0000000000..765c027446 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots_grpc.pb.go @@ -0,0 +1,458 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto + +package snapshots + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// SnapshotsClient is the client API for Snapshots service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SnapshotsClient interface { + Prepare(ctx context.Context, in *PrepareSnapshotRequest, opts ...grpc.CallOption) (*PrepareSnapshotResponse, error) + View(ctx context.Context, in *ViewSnapshotRequest, opts ...grpc.CallOption) (*ViewSnapshotResponse, error) + Mounts(ctx context.Context, in *MountsRequest, opts ...grpc.CallOption) (*MountsResponse, error) + Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + Stat(ctx context.Context, in *StatSnapshotRequest, opts ...grpc.CallOption) (*StatSnapshotResponse, error) + Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*UpdateSnapshotResponse, error) + List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (Snapshots_ListClient, error) + Usage(ctx context.Context, in *UsageRequest, opts ...grpc.CallOption) (*UsageResponse, error) + Cleanup(ctx context.Context, in *CleanupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type snapshotsClient struct { + cc grpc.ClientConnInterface +} + +func NewSnapshotsClient(cc grpc.ClientConnInterface) SnapshotsClient { + return &snapshotsClient{cc} +} + +func (c *snapshotsClient) Prepare(ctx context.Context, in *PrepareSnapshotRequest, opts ...grpc.CallOption) (*PrepareSnapshotResponse, error) { + out := new(PrepareSnapshotResponse) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Prepare", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) View(ctx context.Context, in *ViewSnapshotRequest, opts ...grpc.CallOption) (*ViewSnapshotResponse, error) { + out := new(ViewSnapshotResponse) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/View", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) Mounts(ctx context.Context, in *MountsRequest, opts ...grpc.CallOption) (*MountsResponse, error) { + out := new(MountsResponse) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Mounts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Commit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Remove", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) Stat(ctx context.Context, in *StatSnapshotRequest, opts ...grpc.CallOption) (*StatSnapshotResponse, error) { + out := new(StatSnapshotResponse) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Stat", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*UpdateSnapshotResponse, error) { + out := new(UpdateSnapshotResponse) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (Snapshots_ListClient, error) { + stream, err := c.cc.NewStream(ctx, &Snapshots_ServiceDesc.Streams[0], "/containerd.services.snapshots.v1.Snapshots/List", opts...) + if err != nil { + return nil, err + } + x := &snapshotsListClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Snapshots_ListClient interface { + Recv() (*ListSnapshotsResponse, error) + grpc.ClientStream +} + +type snapshotsListClient struct { + grpc.ClientStream +} + +func (x *snapshotsListClient) Recv() (*ListSnapshotsResponse, error) { + m := new(ListSnapshotsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *snapshotsClient) Usage(ctx context.Context, in *UsageRequest, opts ...grpc.CallOption) (*UsageResponse, error) { + out := new(UsageResponse) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Usage", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) Cleanup(ctx context.Context, in *CleanupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Cleanup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SnapshotsServer is the server API for Snapshots service. +// All implementations must embed UnimplementedSnapshotsServer +// for forward compatibility +type SnapshotsServer interface { + Prepare(context.Context, *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error) + View(context.Context, *ViewSnapshotRequest) (*ViewSnapshotResponse, error) + Mounts(context.Context, *MountsRequest) (*MountsResponse, error) + Commit(context.Context, *CommitSnapshotRequest) (*emptypb.Empty, error) + Remove(context.Context, *RemoveSnapshotRequest) (*emptypb.Empty, error) + Stat(context.Context, *StatSnapshotRequest) (*StatSnapshotResponse, error) + Update(context.Context, *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error) + List(*ListSnapshotsRequest, Snapshots_ListServer) error + Usage(context.Context, *UsageRequest) (*UsageResponse, error) + Cleanup(context.Context, *CleanupRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedSnapshotsServer() +} + +// UnimplementedSnapshotsServer must be embedded to have forward compatible implementations. +type UnimplementedSnapshotsServer struct { +} + +func (UnimplementedSnapshotsServer) Prepare(context.Context, *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Prepare not implemented") +} +func (UnimplementedSnapshotsServer) View(context.Context, *ViewSnapshotRequest) (*ViewSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method View not implemented") +} +func (UnimplementedSnapshotsServer) Mounts(context.Context, *MountsRequest) (*MountsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Mounts not implemented") +} +func (UnimplementedSnapshotsServer) Commit(context.Context, *CommitSnapshotRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") +} +func (UnimplementedSnapshotsServer) Remove(context.Context, *RemoveSnapshotRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Remove not implemented") +} +func (UnimplementedSnapshotsServer) Stat(context.Context, *StatSnapshotRequest) (*StatSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stat not implemented") +} +func (UnimplementedSnapshotsServer) Update(context.Context, *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedSnapshotsServer) List(*ListSnapshotsRequest, Snapshots_ListServer) error { + return status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedSnapshotsServer) Usage(context.Context, *UsageRequest) (*UsageResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Usage not implemented") +} +func (UnimplementedSnapshotsServer) Cleanup(context.Context, *CleanupRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Cleanup not implemented") +} +func (UnimplementedSnapshotsServer) mustEmbedUnimplementedSnapshotsServer() {} + +// UnsafeSnapshotsServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SnapshotsServer will +// result in compilation errors. +type UnsafeSnapshotsServer interface { + mustEmbedUnimplementedSnapshotsServer() +} + +func RegisterSnapshotsServer(s grpc.ServiceRegistrar, srv SnapshotsServer) { + s.RegisterService(&Snapshots_ServiceDesc, srv) +} + +func _Snapshots_Prepare_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PrepareSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Prepare(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Prepare", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Prepare(ctx, req.(*PrepareSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_View_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ViewSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).View(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/View", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).View(ctx, req.(*ViewSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_Mounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MountsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Mounts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Mounts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Mounts(ctx, req.(*MountsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CommitSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Commit(ctx, req.(*CommitSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_Remove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Remove(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Remove", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Remove(ctx, req.(*RemoveSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_Stat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Stat(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Stat", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Stat(ctx, req.(*StatSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Update(ctx, req.(*UpdateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_List_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListSnapshotsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SnapshotsServer).List(m, &snapshotsListServer{stream}) +} + +type Snapshots_ListServer interface { + Send(*ListSnapshotsResponse) error + grpc.ServerStream +} + +type snapshotsListServer struct { + grpc.ServerStream +} + +func (x *snapshotsListServer) Send(m *ListSnapshotsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Snapshots_Usage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UsageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Usage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Usage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Usage(ctx, req.(*UsageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_Cleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CleanupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Cleanup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Cleanup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Cleanup(ctx, req.(*CleanupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Snapshots_ServiceDesc is the grpc.ServiceDesc for Snapshots service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Snapshots_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.snapshots.v1.Snapshots", + HandlerType: (*SnapshotsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Prepare", + Handler: _Snapshots_Prepare_Handler, + }, + { + MethodName: "View", + Handler: _Snapshots_View_Handler, + }, + { + MethodName: "Mounts", + Handler: _Snapshots_Mounts_Handler, + }, + { + MethodName: "Commit", + Handler: _Snapshots_Commit_Handler, + }, + { + MethodName: "Remove", + Handler: _Snapshots_Remove_Handler, + }, + { + MethodName: "Stat", + Handler: _Snapshots_Stat_Handler, + }, + { + MethodName: "Update", + Handler: _Snapshots_Update_Handler, + }, + { + MethodName: "Usage", + Handler: _Snapshots_Usage_Handler, + }, + { + MethodName: "Cleanup", + Handler: _Snapshots_Cleanup_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "List", + Handler: _Snapshots_List_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/streaming/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/streaming/v1/doc.go new file mode 100644 index 0000000000..04c4362d83 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/streaming/v1/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package streaming diff --git a/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.pb.go b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.pb.go new file mode 100644 index 0000000000..08fb6392ec --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.pb.go @@ -0,0 +1,175 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/streaming/v1/streaming.proto + +package streaming + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StreamInit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *StreamInit) Reset() { + *x = StreamInit{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamInit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamInit) ProtoMessage() {} + +func (x *StreamInit) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamInit.ProtoReflect.Descriptor instead. +func (*StreamInit) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescGZIP(), []int{0} +} + +func (x *StreamInit) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +var File_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDesc = []byte{ + 0x0a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x1a, 0x19, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x1c, 0x0a, 0x0a, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x32, 0x45, 0x0a, 0x09, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x38, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x1a, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x28, 0x01, 0x30, 0x01, 0x42, 0x46, + 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescData = file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_goTypes = []interface{}{ + (*StreamInit)(nil), // 0: containerd.services.streaming.v1.StreamInit + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_depIdxs = []int32{ + 1, // 0: containerd.services.streaming.v1.Streaming.Stream:input_type -> google.protobuf.Any + 1, // 1: containerd.services.streaming.v1.Streaming.Stream:output_type -> google.protobuf.Any + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_init() } +func file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_init() { + if File_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamInit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto = out.File + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.proto b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.proto new file mode 100644 index 0000000000..4c14f2ecfa --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.proto @@ -0,0 +1,31 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.services.streaming.v1; + +import "google/protobuf/any.proto"; + +option go_package = "github.com/containerd/containerd/api/services/streaming/v1;streaming"; + +service Streaming { + rpc Stream(stream google.protobuf.Any) returns (stream google.protobuf.Any); +} + +message StreamInit { + string id = 1; +} diff --git a/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming_grpc.pb.go new file mode 100644 index 0000000000..a0a0bc59c8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming_grpc.pb.go @@ -0,0 +1,138 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/streaming/v1/streaming.proto + +package streaming + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// StreamingClient is the client API for Streaming service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type StreamingClient interface { + Stream(ctx context.Context, opts ...grpc.CallOption) (Streaming_StreamClient, error) +} + +type streamingClient struct { + cc grpc.ClientConnInterface +} + +func NewStreamingClient(cc grpc.ClientConnInterface) StreamingClient { + return &streamingClient{cc} +} + +func (c *streamingClient) Stream(ctx context.Context, opts ...grpc.CallOption) (Streaming_StreamClient, error) { + stream, err := c.cc.NewStream(ctx, &Streaming_ServiceDesc.Streams[0], "/containerd.services.streaming.v1.Streaming/Stream", opts...) + if err != nil { + return nil, err + } + x := &streamingStreamClient{stream} + return x, nil +} + +type Streaming_StreamClient interface { + Send(*anypb.Any) error + Recv() (*anypb.Any, error) + grpc.ClientStream +} + +type streamingStreamClient struct { + grpc.ClientStream +} + +func (x *streamingStreamClient) Send(m *anypb.Any) error { + return x.ClientStream.SendMsg(m) +} + +func (x *streamingStreamClient) Recv() (*anypb.Any, error) { + m := new(anypb.Any) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// StreamingServer is the server API for Streaming service. +// All implementations must embed UnimplementedStreamingServer +// for forward compatibility +type StreamingServer interface { + Stream(Streaming_StreamServer) error + mustEmbedUnimplementedStreamingServer() +} + +// UnimplementedStreamingServer must be embedded to have forward compatible implementations. +type UnimplementedStreamingServer struct { +} + +func (UnimplementedStreamingServer) Stream(Streaming_StreamServer) error { + return status.Errorf(codes.Unimplemented, "method Stream not implemented") +} +func (UnimplementedStreamingServer) mustEmbedUnimplementedStreamingServer() {} + +// UnsafeStreamingServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to StreamingServer will +// result in compilation errors. +type UnsafeStreamingServer interface { + mustEmbedUnimplementedStreamingServer() +} + +func RegisterStreamingServer(s grpc.ServiceRegistrar, srv StreamingServer) { + s.RegisterService(&Streaming_ServiceDesc, srv) +} + +func _Streaming_Stream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StreamingServer).Stream(&streamingStreamServer{stream}) +} + +type Streaming_StreamServer interface { + Send(*anypb.Any) error + Recv() (*anypb.Any, error) + grpc.ServerStream +} + +type streamingStreamServer struct { + grpc.ServerStream +} + +func (x *streamingStreamServer) Send(m *anypb.Any) error { + return x.ServerStream.SendMsg(m) +} + +func (x *streamingStreamServer) Recv() (*anypb.Any, error) { + m := new(anypb.Any) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Streaming_ServiceDesc is the grpc.ServiceDesc for Streaming service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Streaming_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.streaming.v1.Streaming", + HandlerType: (*StreamingServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Stream", + Handler: _Streaming_Stream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "github.com/containerd/containerd/api/services/streaming/v1/streaming.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go index dcc7680893..1a55d696dd 100644 --- a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go @@ -1,42 +1,50 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto package tasks import ( - context "context" - fmt "fmt" types "github.com/containerd/containerd/api/types" task "github.com/containerd/containerd/api/types/task" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types1 "github.com/gogo/protobuf/types" - github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type CreateTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` // RootFS provides the pre-chroot mounts to perform in the shim before // executing the container task. @@ -44,491 +52,713 @@ type CreateTaskRequest struct { // These are for mounts that cannot be performed in the user namespace. // Typically, these mounts should be resolved from snapshots specified on // the container object. - Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` - Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` - Terminal bool `protobuf:"varint,7,opt,name=terminal,proto3" json:"terminal,omitempty"` - Checkpoint *types.Descriptor `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` - Options *types1.Any `protobuf:"bytes,9,opt,name=options,proto3" json:"options,omitempty"` - RuntimePath string `protobuf:"bytes,10,opt,name=runtime_path,json=runtimePath,proto3" json:"runtime_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` + Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,7,opt,name=terminal,proto3" json:"terminal,omitempty"` + Checkpoint *types.Descriptor `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` + Options *anypb.Any `protobuf:"bytes,9,opt,name=options,proto3" json:"options,omitempty"` + RuntimePath string `protobuf:"bytes,10,opt,name=runtime_path,json=runtimePath,proto3" json:"runtime_path,omitempty"` } -func (m *CreateTaskRequest) Reset() { *m = CreateTaskRequest{} } -func (*CreateTaskRequest) ProtoMessage() {} -func (*CreateTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{0} -} -func (m *CreateTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CreateTaskRequest) Reset() { + *x = CreateTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CreateTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateTaskRequest.Merge(m, src) -} -func (m *CreateTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *CreateTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateTaskRequest.DiscardUnknown(m) + +func (x *CreateTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CreateTaskRequest proto.InternalMessageInfo +func (*CreateTaskRequest) ProtoMessage() {} + +func (x *CreateTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTaskRequest.ProtoReflect.Descriptor instead. +func (*CreateTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateTaskRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *CreateTaskRequest) GetRootfs() []*types.Mount { + if x != nil { + return x.Rootfs + } + return nil +} + +func (x *CreateTaskRequest) GetStdin() string { + if x != nil { + return x.Stdin + } + return "" +} + +func (x *CreateTaskRequest) GetStdout() string { + if x != nil { + return x.Stdout + } + return "" +} + +func (x *CreateTaskRequest) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" +} + +func (x *CreateTaskRequest) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +func (x *CreateTaskRequest) GetCheckpoint() *types.Descriptor { + if x != nil { + return x.Checkpoint + } + return nil +} + +func (x *CreateTaskRequest) GetOptions() *anypb.Any { + if x != nil { + return x.Options + } + return nil +} + +func (x *CreateTaskRequest) GetRuntimePath() string { + if x != nil { + return x.RuntimePath + } + return "" +} type CreateTaskResponse struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` } -func (m *CreateTaskResponse) Reset() { *m = CreateTaskResponse{} } -func (*CreateTaskResponse) ProtoMessage() {} -func (*CreateTaskResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{1} -} -func (m *CreateTaskResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateTaskResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CreateTaskResponse) Reset() { + *x = CreateTaskResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CreateTaskResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateTaskResponse.Merge(m, src) -} -func (m *CreateTaskResponse) XXX_Size() int { - return m.Size() -} -func (m *CreateTaskResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateTaskResponse.DiscardUnknown(m) + +func (x *CreateTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CreateTaskResponse proto.InternalMessageInfo +func (*CreateTaskResponse) ProtoMessage() {} + +func (x *CreateTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTaskResponse.ProtoReflect.Descriptor instead. +func (*CreateTaskResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateTaskResponse) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *CreateTaskResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} type StartRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` } -func (m *StartRequest) Reset() { *m = StartRequest{} } -func (*StartRequest) ProtoMessage() {} -func (*StartRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{2} -} -func (m *StartRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StartRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *StartRequest) Reset() { + *x = StartRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *StartRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartRequest.Merge(m, src) -} -func (m *StartRequest) XXX_Size() int { - return m.Size() -} -func (m *StartRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StartRequest.DiscardUnknown(m) + +func (x *StartRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_StartRequest proto.InternalMessageInfo +func (*StartRequest) ProtoMessage() {} + +func (x *StartRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartRequest.ProtoReflect.Descriptor instead. +func (*StartRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{2} +} + +func (x *StartRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *StartRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} type StartResponse struct { - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` } -func (m *StartResponse) Reset() { *m = StartResponse{} } -func (*StartResponse) ProtoMessage() {} -func (*StartResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{3} -} -func (m *StartResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StartResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StartResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *StartResponse) Reset() { + *x = StartResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *StartResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartResponse.Merge(m, src) -} -func (m *StartResponse) XXX_Size() int { - return m.Size() -} -func (m *StartResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StartResponse.DiscardUnknown(m) + +func (x *StartResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_StartResponse proto.InternalMessageInfo +func (*StartResponse) ProtoMessage() {} + +func (x *StartResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartResponse.ProtoReflect.Descriptor instead. +func (*StartResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{3} +} + +func (x *StartResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} type DeleteTaskRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -func (m *DeleteTaskRequest) Reset() { *m = DeleteTaskRequest{} } -func (*DeleteTaskRequest) ProtoMessage() {} -func (*DeleteTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{4} -} -func (m *DeleteTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteTaskRequest) Reset() { + *x = DeleteTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteTaskRequest.Merge(m, src) -} -func (m *DeleteTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteTaskRequest.DiscardUnknown(m) + +func (x *DeleteTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_DeleteTaskRequest proto.InternalMessageInfo +func (*DeleteTaskRequest) ProtoMessage() {} + +func (x *DeleteTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteTaskRequest.ProtoReflect.Descriptor instead. +func (*DeleteTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{4} +} + +func (x *DeleteTaskRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} type DeleteResponse struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - ExitStatus uint32 `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` } -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } -func (*DeleteResponse) ProtoMessage() {} -func (*DeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{5} -} -func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteResponse) Reset() { + *x = DeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteResponse.Merge(m, src) -} -func (m *DeleteResponse) XXX_Size() int { - return m.Size() -} -func (m *DeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteResponse.DiscardUnknown(m) + +func (x *DeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo +func (*DeleteResponse) ProtoMessage() {} + +func (x *DeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteResponse.ProtoReflect.Descriptor instead. +func (*DeleteResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteResponse) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *DeleteResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *DeleteResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *DeleteResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} type DeleteProcessRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` } -func (m *DeleteProcessRequest) Reset() { *m = DeleteProcessRequest{} } -func (*DeleteProcessRequest) ProtoMessage() {} -func (*DeleteProcessRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{6} -} -func (m *DeleteProcessRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteProcessRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteProcessRequest) Reset() { + *x = DeleteProcessRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteProcessRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteProcessRequest.Merge(m, src) -} -func (m *DeleteProcessRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteProcessRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteProcessRequest.DiscardUnknown(m) + +func (x *DeleteProcessRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_DeleteProcessRequest proto.InternalMessageInfo +func (*DeleteProcessRequest) ProtoMessage() {} + +func (x *DeleteProcessRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteProcessRequest.ProtoReflect.Descriptor instead. +func (*DeleteProcessRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{6} +} + +func (x *DeleteProcessRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *DeleteProcessRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} type GetRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` } -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (*GetRequest) ProtoMessage() {} -func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{7} -} -func (m *GetRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetRequest) Reset() { + *x = GetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *GetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRequest.Merge(m, src) -} -func (m *GetRequest) XXX_Size() int { - return m.Size() -} -func (m *GetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRequest.DiscardUnknown(m) + +func (x *GetRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_GetRequest proto.InternalMessageInfo +func (*GetRequest) ProtoMessage() {} + +func (x *GetRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRequest.ProtoReflect.Descriptor instead. +func (*GetRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{7} +} + +func (x *GetRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *GetRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} type GetResponse struct { - Process *task.Process `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Process *task.Process `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` } -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (*GetResponse) ProtoMessage() {} -func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{8} -} -func (m *GetResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetResponse) Reset() { + *x = GetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *GetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse.Merge(m, src) -} -func (m *GetResponse) XXX_Size() int { - return m.Size() -} -func (m *GetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse.DiscardUnknown(m) + +func (x *GetResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_GetResponse proto.InternalMessageInfo +func (*GetResponse) ProtoMessage() {} + +func (x *GetResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResponse.ProtoReflect.Descriptor instead. +func (*GetResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{8} +} + +func (x *GetResponse) GetProcess() *task.Process { + if x != nil { + return x.Process + } + return nil +} type ListTasksRequest struct { - Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` } -func (m *ListTasksRequest) Reset() { *m = ListTasksRequest{} } -func (*ListTasksRequest) ProtoMessage() {} -func (*ListTasksRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{9} -} -func (m *ListTasksRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListTasksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListTasksRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListTasksRequest) Reset() { + *x = ListTasksRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListTasksRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListTasksRequest.Merge(m, src) -} -func (m *ListTasksRequest) XXX_Size() int { - return m.Size() -} -func (m *ListTasksRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListTasksRequest.DiscardUnknown(m) + +func (x *ListTasksRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListTasksRequest proto.InternalMessageInfo +func (*ListTasksRequest) ProtoMessage() {} + +func (x *ListTasksRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTasksRequest.ProtoReflect.Descriptor instead. +func (*ListTasksRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{9} +} + +func (x *ListTasksRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} type ListTasksResponse struct { - Tasks []*task.Process `protobuf:"bytes,1,rep,name=tasks,proto3" json:"tasks,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tasks []*task.Process `protobuf:"bytes,1,rep,name=tasks,proto3" json:"tasks,omitempty"` } -func (m *ListTasksResponse) Reset() { *m = ListTasksResponse{} } -func (*ListTasksResponse) ProtoMessage() {} -func (*ListTasksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{10} -} -func (m *ListTasksResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListTasksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListTasksResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListTasksResponse) Reset() { + *x = ListTasksResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListTasksResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListTasksResponse.Merge(m, src) -} -func (m *ListTasksResponse) XXX_Size() int { - return m.Size() -} -func (m *ListTasksResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListTasksResponse.DiscardUnknown(m) + +func (x *ListTasksResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListTasksResponse proto.InternalMessageInfo +func (*ListTasksResponse) ProtoMessage() {} + +func (x *ListTasksResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTasksResponse.ProtoReflect.Descriptor instead. +func (*ListTasksResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{10} +} + +func (x *ListTasksResponse) GetTasks() []*task.Process { + if x != nil { + return x.Tasks + } + return nil +} type KillRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Signal uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` - All bool `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Signal uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` + All bool `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"` } -func (m *KillRequest) Reset() { *m = KillRequest{} } -func (*KillRequest) ProtoMessage() {} -func (*KillRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{11} -} -func (m *KillRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KillRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KillRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *KillRequest) Reset() { + *x = KillRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *KillRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_KillRequest.Merge(m, src) -} -func (m *KillRequest) XXX_Size() int { - return m.Size() -} -func (m *KillRequest) XXX_DiscardUnknown() { - xxx_messageInfo_KillRequest.DiscardUnknown(m) + +func (x *KillRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_KillRequest proto.InternalMessageInfo +func (*KillRequest) ProtoMessage() {} + +func (x *KillRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KillRequest.ProtoReflect.Descriptor instead. +func (*KillRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{11} +} + +func (x *KillRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *KillRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +func (x *KillRequest) GetSignal() uint32 { + if x != nil { + return x.Signal + } + return 0 +} + +func (x *KillRequest) GetAll() bool { + if x != nil { + return x.All + } + return false +} type ExecProcessRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` Stdin string `protobuf:"bytes,2,opt,name=stdin,proto3" json:"stdin,omitempty"` Stdout string `protobuf:"bytes,3,opt,name=stdout,proto3" json:"stdout,omitempty"` @@ -537,6975 +767,1593 @@ type ExecProcessRequest struct { // Spec for starting a process in the target container. // // For runc, this is a process spec, for example. - Spec *types1.Any `protobuf:"bytes,6,opt,name=spec,proto3" json:"spec,omitempty"` + Spec *anypb.Any `protobuf:"bytes,6,opt,name=spec,proto3" json:"spec,omitempty"` // id of the exec process - ExecID string `protobuf:"bytes,7,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ExecID string `protobuf:"bytes,7,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` } -func (m *ExecProcessRequest) Reset() { *m = ExecProcessRequest{} } -func (*ExecProcessRequest) ProtoMessage() {} -func (*ExecProcessRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{12} -} -func (m *ExecProcessRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExecProcessRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ExecProcessRequest) Reset() { + *x = ExecProcessRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ExecProcessRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecProcessRequest.Merge(m, src) -} -func (m *ExecProcessRequest) XXX_Size() int { - return m.Size() -} -func (m *ExecProcessRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExecProcessRequest.DiscardUnknown(m) + +func (x *ExecProcessRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ExecProcessRequest proto.InternalMessageInfo +func (*ExecProcessRequest) ProtoMessage() {} + +func (x *ExecProcessRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecProcessRequest.ProtoReflect.Descriptor instead. +func (*ExecProcessRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{12} +} + +func (x *ExecProcessRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *ExecProcessRequest) GetStdin() string { + if x != nil { + return x.Stdin + } + return "" +} + +func (x *ExecProcessRequest) GetStdout() string { + if x != nil { + return x.Stdout + } + return "" +} + +func (x *ExecProcessRequest) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" +} + +func (x *ExecProcessRequest) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +func (x *ExecProcessRequest) GetSpec() *anypb.Any { + if x != nil { + return x.Spec + } + return nil +} + +func (x *ExecProcessRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} type ExecProcessResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *ExecProcessResponse) Reset() { *m = ExecProcessResponse{} } -func (*ExecProcessResponse) ProtoMessage() {} -func (*ExecProcessResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{13} -} -func (m *ExecProcessResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecProcessResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExecProcessResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ExecProcessResponse) Reset() { + *x = ExecProcessResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ExecProcessResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecProcessResponse.Merge(m, src) -} -func (m *ExecProcessResponse) XXX_Size() int { - return m.Size() -} -func (m *ExecProcessResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExecProcessResponse.DiscardUnknown(m) + +func (x *ExecProcessResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ExecProcessResponse proto.InternalMessageInfo +func (*ExecProcessResponse) ProtoMessage() {} + +func (x *ExecProcessResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecProcessResponse.ProtoReflect.Descriptor instead. +func (*ExecProcessResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{13} +} type ResizePtyRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Width uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"` - Height uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Width uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"` + Height uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` } -func (m *ResizePtyRequest) Reset() { *m = ResizePtyRequest{} } -func (*ResizePtyRequest) ProtoMessage() {} -func (*ResizePtyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{14} -} -func (m *ResizePtyRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResizePtyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResizePtyRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ResizePtyRequest) Reset() { + *x = ResizePtyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ResizePtyRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResizePtyRequest.Merge(m, src) -} -func (m *ResizePtyRequest) XXX_Size() int { - return m.Size() -} -func (m *ResizePtyRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResizePtyRequest.DiscardUnknown(m) + +func (x *ResizePtyRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ResizePtyRequest proto.InternalMessageInfo +func (*ResizePtyRequest) ProtoMessage() {} + +func (x *ResizePtyRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResizePtyRequest.ProtoReflect.Descriptor instead. +func (*ResizePtyRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{14} +} + +func (x *ResizePtyRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *ResizePtyRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +func (x *ResizePtyRequest) GetWidth() uint32 { + if x != nil { + return x.Width + } + return 0 +} + +func (x *ResizePtyRequest) GetHeight() uint32 { + if x != nil { + return x.Height + } + return 0 +} type CloseIORequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Stdin bool `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Stdin bool `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"` } -func (m *CloseIORequest) Reset() { *m = CloseIORequest{} } -func (*CloseIORequest) ProtoMessage() {} -func (*CloseIORequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{15} -} -func (m *CloseIORequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CloseIORequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CloseIORequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CloseIORequest) Reset() { + *x = CloseIORequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CloseIORequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseIORequest.Merge(m, src) -} -func (m *CloseIORequest) XXX_Size() int { - return m.Size() -} -func (m *CloseIORequest) XXX_DiscardUnknown() { - xxx_messageInfo_CloseIORequest.DiscardUnknown(m) + +func (x *CloseIORequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CloseIORequest proto.InternalMessageInfo +func (*CloseIORequest) ProtoMessage() {} + +func (x *CloseIORequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloseIORequest.ProtoReflect.Descriptor instead. +func (*CloseIORequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{15} +} + +func (x *CloseIORequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *CloseIORequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +func (x *CloseIORequest) GetStdin() bool { + if x != nil { + return x.Stdin + } + return false +} type PauseTaskRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -func (m *PauseTaskRequest) Reset() { *m = PauseTaskRequest{} } -func (*PauseTaskRequest) ProtoMessage() {} -func (*PauseTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{16} -} -func (m *PauseTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PauseTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PauseTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *PauseTaskRequest) Reset() { + *x = PauseTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *PauseTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PauseTaskRequest.Merge(m, src) -} -func (m *PauseTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *PauseTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PauseTaskRequest.DiscardUnknown(m) + +func (x *PauseTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_PauseTaskRequest proto.InternalMessageInfo +func (*PauseTaskRequest) ProtoMessage() {} + +func (x *PauseTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PauseTaskRequest.ProtoReflect.Descriptor instead. +func (*PauseTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{16} +} + +func (x *PauseTaskRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} type ResumeTaskRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -func (m *ResumeTaskRequest) Reset() { *m = ResumeTaskRequest{} } -func (*ResumeTaskRequest) ProtoMessage() {} -func (*ResumeTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{17} -} -func (m *ResumeTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResumeTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResumeTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ResumeTaskRequest) Reset() { + *x = ResumeTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ResumeTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResumeTaskRequest.Merge(m, src) -} -func (m *ResumeTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *ResumeTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResumeTaskRequest.DiscardUnknown(m) + +func (x *ResumeTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ResumeTaskRequest proto.InternalMessageInfo +func (*ResumeTaskRequest) ProtoMessage() {} + +func (x *ResumeTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResumeTaskRequest.ProtoReflect.Descriptor instead. +func (*ResumeTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{17} +} + +func (x *ResumeTaskRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} type ListPidsRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -func (m *ListPidsRequest) Reset() { *m = ListPidsRequest{} } -func (*ListPidsRequest) ProtoMessage() {} -func (*ListPidsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{18} -} -func (m *ListPidsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListPidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListPidsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListPidsRequest) Reset() { + *x = ListPidsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListPidsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListPidsRequest.Merge(m, src) -} -func (m *ListPidsRequest) XXX_Size() int { - return m.Size() -} -func (m *ListPidsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListPidsRequest.DiscardUnknown(m) + +func (x *ListPidsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListPidsRequest proto.InternalMessageInfo +func (*ListPidsRequest) ProtoMessage() {} + +func (x *ListPidsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListPidsRequest.ProtoReflect.Descriptor instead. +func (*ListPidsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{18} +} + +func (x *ListPidsRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} type ListPidsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Processes includes the process ID and additional process information - Processes []*task.ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Processes []*task.ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` } -func (m *ListPidsResponse) Reset() { *m = ListPidsResponse{} } -func (*ListPidsResponse) ProtoMessage() {} -func (*ListPidsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{19} -} -func (m *ListPidsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListPidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListPidsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListPidsResponse) Reset() { + *x = ListPidsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListPidsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListPidsResponse.Merge(m, src) -} -func (m *ListPidsResponse) XXX_Size() int { - return m.Size() -} -func (m *ListPidsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListPidsResponse.DiscardUnknown(m) + +func (x *ListPidsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListPidsResponse proto.InternalMessageInfo +func (*ListPidsResponse) ProtoMessage() {} + +func (x *ListPidsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListPidsResponse.ProtoReflect.Descriptor instead. +func (*ListPidsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{19} +} + +func (x *ListPidsResponse) GetProcesses() []*task.ProcessInfo { + if x != nil { + return x.Processes + } + return nil +} type CheckpointTaskRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ParentCheckpoint github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=parent_checkpoint,json=parentCheckpoint,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"parent_checkpoint"` - Options *types1.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ParentCheckpoint string `protobuf:"bytes,2,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"` + Options *anypb.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` } -func (m *CheckpointTaskRequest) Reset() { *m = CheckpointTaskRequest{} } -func (*CheckpointTaskRequest) ProtoMessage() {} -func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{20} -} -func (m *CheckpointTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CheckpointTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckpointTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CheckpointTaskRequest) Reset() { + *x = CheckpointTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CheckpointTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckpointTaskRequest.Merge(m, src) -} -func (m *CheckpointTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *CheckpointTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CheckpointTaskRequest.DiscardUnknown(m) + +func (x *CheckpointTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CheckpointTaskRequest proto.InternalMessageInfo +func (*CheckpointTaskRequest) ProtoMessage() {} + +func (x *CheckpointTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckpointTaskRequest.ProtoReflect.Descriptor instead. +func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{20} +} + +func (x *CheckpointTaskRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *CheckpointTaskRequest) GetParentCheckpoint() string { + if x != nil { + return x.ParentCheckpoint + } + return "" +} + +func (x *CheckpointTaskRequest) GetOptions() *anypb.Any { + if x != nil { + return x.Options + } + return nil +} type CheckpointTaskResponse struct { - Descriptors []*types.Descriptor `protobuf:"bytes,1,rep,name=descriptors,proto3" json:"descriptors,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Descriptors []*types.Descriptor `protobuf:"bytes,1,rep,name=descriptors,proto3" json:"descriptors,omitempty"` } -func (m *CheckpointTaskResponse) Reset() { *m = CheckpointTaskResponse{} } -func (*CheckpointTaskResponse) ProtoMessage() {} -func (*CheckpointTaskResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{21} -} -func (m *CheckpointTaskResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CheckpointTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckpointTaskResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CheckpointTaskResponse) Reset() { + *x = CheckpointTaskResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CheckpointTaskResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckpointTaskResponse.Merge(m, src) -} -func (m *CheckpointTaskResponse) XXX_Size() int { - return m.Size() -} -func (m *CheckpointTaskResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CheckpointTaskResponse.DiscardUnknown(m) + +func (x *CheckpointTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CheckpointTaskResponse proto.InternalMessageInfo +func (*CheckpointTaskResponse) ProtoMessage() {} + +func (x *CheckpointTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckpointTaskResponse.ProtoReflect.Descriptor instead. +func (*CheckpointTaskResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{21} +} + +func (x *CheckpointTaskResponse) GetDescriptors() []*types.Descriptor { + if x != nil { + return x.Descriptors + } + return nil +} type UpdateTaskRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Resources *types1.Any `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` - Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Resources *anypb.Any `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{} } -func (*UpdateTaskRequest) ProtoMessage() {} -func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{22} -} -func (m *UpdateTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateTaskRequest) Reset() { + *x = UpdateTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateTaskRequest.Merge(m, src) -} -func (m *UpdateTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *UpdateTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateTaskRequest.DiscardUnknown(m) + +func (x *UpdateTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_UpdateTaskRequest proto.InternalMessageInfo +func (*UpdateTaskRequest) ProtoMessage() {} + +func (x *UpdateTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateTaskRequest.ProtoReflect.Descriptor instead. +func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{22} +} + +func (x *UpdateTaskRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *UpdateTaskRequest) GetResources() *anypb.Any { + if x != nil { + return x.Resources + } + return nil +} + +func (x *UpdateTaskRequest) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} type MetricsRequest struct { - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func (m *MetricsRequest) Reset() { *m = MetricsRequest{} } -func (*MetricsRequest) ProtoMessage() {} -func (*MetricsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{23} -} -func (m *MetricsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *MetricsRequest) Reset() { + *x = MetricsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *MetricsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsRequest.Merge(m, src) -} -func (m *MetricsRequest) XXX_Size() int { - return m.Size() -} -func (m *MetricsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsRequest.DiscardUnknown(m) + +func (x *MetricsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_MetricsRequest proto.InternalMessageInfo +func (*MetricsRequest) ProtoMessage() {} + +func (x *MetricsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricsRequest.ProtoReflect.Descriptor instead. +func (*MetricsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{23} +} + +func (x *MetricsRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil +} type MetricsResponse struct { - Metrics []*types.Metric `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Metrics []*types.Metric `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty"` } -func (m *MetricsResponse) Reset() { *m = MetricsResponse{} } -func (*MetricsResponse) ProtoMessage() {} -func (*MetricsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{24} -} -func (m *MetricsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *MetricsResponse) Reset() { + *x = MetricsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *MetricsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsResponse.Merge(m, src) -} -func (m *MetricsResponse) XXX_Size() int { - return m.Size() -} -func (m *MetricsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsResponse.DiscardUnknown(m) + +func (x *MetricsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_MetricsResponse proto.InternalMessageInfo +func (*MetricsResponse) ProtoMessage() {} + +func (x *MetricsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricsResponse.ProtoReflect.Descriptor instead. +func (*MetricsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{24} +} + +func (x *MetricsResponse) GetMetrics() []*types.Metric { + if x != nil { + return x.Metrics + } + return nil +} type WaitRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` } -func (m *WaitRequest) Reset() { *m = WaitRequest{} } -func (*WaitRequest) ProtoMessage() {} -func (*WaitRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{25} -} -func (m *WaitRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WaitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WaitRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *WaitRequest) Reset() { + *x = WaitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *WaitRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WaitRequest.Merge(m, src) -} -func (m *WaitRequest) XXX_Size() int { - return m.Size() -} -func (m *WaitRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WaitRequest.DiscardUnknown(m) + +func (x *WaitRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_WaitRequest proto.InternalMessageInfo +func (*WaitRequest) ProtoMessage() {} + +func (x *WaitRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WaitRequest.ProtoReflect.Descriptor instead. +func (*WaitRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{25} +} + +func (x *WaitRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *WaitRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} type WaitResponse struct { - ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` +} + +func (x *WaitResponse) Reset() { + *x = WaitResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WaitResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *WaitResponse) Reset() { *m = WaitResponse{} } func (*WaitResponse) ProtoMessage() {} + +func (x *WaitResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WaitResponse.ProtoReflect.Descriptor instead. func (*WaitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{26} -} -func (m *WaitResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WaitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WaitResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WaitResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WaitResponse.Merge(m, src) -} -func (m *WaitResponse) XXX_Size() int { - return m.Size() -} -func (m *WaitResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WaitResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WaitResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*CreateTaskRequest)(nil), "containerd.services.tasks.v1.CreateTaskRequest") - proto.RegisterType((*CreateTaskResponse)(nil), "containerd.services.tasks.v1.CreateTaskResponse") - proto.RegisterType((*StartRequest)(nil), "containerd.services.tasks.v1.StartRequest") - proto.RegisterType((*StartResponse)(nil), "containerd.services.tasks.v1.StartResponse") - proto.RegisterType((*DeleteTaskRequest)(nil), "containerd.services.tasks.v1.DeleteTaskRequest") - proto.RegisterType((*DeleteResponse)(nil), "containerd.services.tasks.v1.DeleteResponse") - proto.RegisterType((*DeleteProcessRequest)(nil), "containerd.services.tasks.v1.DeleteProcessRequest") - proto.RegisterType((*GetRequest)(nil), "containerd.services.tasks.v1.GetRequest") - proto.RegisterType((*GetResponse)(nil), "containerd.services.tasks.v1.GetResponse") - proto.RegisterType((*ListTasksRequest)(nil), "containerd.services.tasks.v1.ListTasksRequest") - proto.RegisterType((*ListTasksResponse)(nil), "containerd.services.tasks.v1.ListTasksResponse") - proto.RegisterType((*KillRequest)(nil), "containerd.services.tasks.v1.KillRequest") - proto.RegisterType((*ExecProcessRequest)(nil), "containerd.services.tasks.v1.ExecProcessRequest") - proto.RegisterType((*ExecProcessResponse)(nil), "containerd.services.tasks.v1.ExecProcessResponse") - proto.RegisterType((*ResizePtyRequest)(nil), "containerd.services.tasks.v1.ResizePtyRequest") - proto.RegisterType((*CloseIORequest)(nil), "containerd.services.tasks.v1.CloseIORequest") - proto.RegisterType((*PauseTaskRequest)(nil), "containerd.services.tasks.v1.PauseTaskRequest") - proto.RegisterType((*ResumeTaskRequest)(nil), "containerd.services.tasks.v1.ResumeTaskRequest") - proto.RegisterType((*ListPidsRequest)(nil), "containerd.services.tasks.v1.ListPidsRequest") - proto.RegisterType((*ListPidsResponse)(nil), "containerd.services.tasks.v1.ListPidsResponse") - proto.RegisterType((*CheckpointTaskRequest)(nil), "containerd.services.tasks.v1.CheckpointTaskRequest") - proto.RegisterType((*CheckpointTaskResponse)(nil), "containerd.services.tasks.v1.CheckpointTaskResponse") - proto.RegisterType((*UpdateTaskRequest)(nil), "containerd.services.tasks.v1.UpdateTaskRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.tasks.v1.UpdateTaskRequest.AnnotationsEntry") - proto.RegisterType((*MetricsRequest)(nil), "containerd.services.tasks.v1.MetricsRequest") - proto.RegisterType((*MetricsResponse)(nil), "containerd.services.tasks.v1.MetricsResponse") - proto.RegisterType((*WaitRequest)(nil), "containerd.services.tasks.v1.WaitRequest") - proto.RegisterType((*WaitResponse)(nil), "containerd.services.tasks.v1.WaitResponse") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/tasks/v1/tasks.proto", fileDescriptor_310e7127b8a26f14) -} - -var fileDescriptor_310e7127b8a26f14 = []byte{ - // 1400 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0x45, - 0x14, 0xee, 0xfa, 0xee, 0xe3, 0xa4, 0x4d, 0x96, 0x34, 0x98, 0xa5, 0x8a, 0xd3, 0xe5, 0xc5, 0x04, - 0xba, 0xa6, 0x2e, 0xaa, 0xaa, 0xb6, 0xaa, 0xc8, 0x8d, 0xc8, 0x82, 0xaa, 0xe9, 0xb6, 0x40, 0x55, - 0x09, 0x99, 0x8d, 0x77, 0x62, 0x8f, 0x62, 0xef, 0x6c, 0x77, 0xc6, 0x69, 0xcd, 0x0b, 0xfc, 0x84, - 0xbe, 0xf2, 0x02, 0x7f, 0xa7, 0x8f, 0x3c, 0x22, 0x54, 0x05, 0xea, 0x57, 0x7e, 0x01, 0x6f, 0x68, - 0x2e, 0xbb, 0xde, 0xd8, 0xf1, 0x25, 0x4d, 0xc3, 0x4b, 0x3b, 0x33, 0x7b, 0xce, 0x99, 0x33, 0xdf, - 0xb9, 0x7d, 0x0e, 0x6c, 0x34, 0x31, 0x6b, 0x75, 0xf7, 0xac, 0x06, 0xe9, 0x54, 0x1a, 0xc4, 0x63, - 0x0e, 0xf6, 0x50, 0xe0, 0xc6, 0x97, 0x8e, 0x8f, 0x2b, 0x14, 0x05, 0x87, 0xb8, 0x81, 0x68, 0x85, - 0x39, 0xf4, 0x80, 0x56, 0x0e, 0xaf, 0xcb, 0x85, 0xe5, 0x07, 0x84, 0x11, 0xfd, 0xca, 0x40, 0xda, - 0x0a, 0x25, 0x2d, 0x29, 0x70, 0x78, 0xdd, 0xf8, 0xb0, 0x49, 0x48, 0xb3, 0x8d, 0x2a, 0x42, 0x76, - 0xaf, 0xbb, 0x5f, 0x41, 0x1d, 0x9f, 0xf5, 0xa4, 0xaa, 0xf1, 0xc1, 0xf0, 0x47, 0xc7, 0x0b, 0x3f, - 0x2d, 0x35, 0x49, 0x93, 0x88, 0x65, 0x85, 0xaf, 0xd4, 0xe9, 0xcd, 0x99, 0xfc, 0x65, 0x3d, 0x1f, - 0xd1, 0x4a, 0x87, 0x74, 0x3d, 0xa6, 0xf4, 0x6e, 0x9d, 0x46, 0x0f, 0xb1, 0x00, 0x37, 0xd4, 0xeb, - 0x8c, 0x3b, 0xa7, 0xd0, 0x74, 0x11, 0x6d, 0x04, 0xd8, 0x67, 0x24, 0x50, 0xca, 0xb7, 0x4f, 0xa1, - 0xcc, 0x11, 0x13, 0xff, 0x28, 0xdd, 0xd2, 0x30, 0x36, 0x0c, 0x77, 0x10, 0x65, 0x4e, 0xc7, 0x97, - 0x02, 0xe6, 0x3f, 0x09, 0x58, 0xdc, 0x0c, 0x90, 0xc3, 0xd0, 0x63, 0x87, 0x1e, 0xd8, 0xe8, 0x59, - 0x17, 0x51, 0xa6, 0x57, 0x61, 0x2e, 0x32, 0x5f, 0xc7, 0x6e, 0x51, 0x5b, 0xd5, 0xca, 0xf9, 0x8d, - 0x4b, 0xfd, 0xa3, 0x52, 0x61, 0x33, 0x3c, 0xaf, 0x6d, 0xd9, 0x85, 0x48, 0xa8, 0xe6, 0xea, 0x15, - 0xc8, 0x04, 0x84, 0xb0, 0x7d, 0x5a, 0x4c, 0xae, 0x26, 0xcb, 0x85, 0xea, 0xfb, 0x56, 0x2c, 0xa4, - 0xc2, 0x3b, 0xeb, 0x3e, 0x07, 0xd3, 0x56, 0x62, 0xfa, 0x12, 0xa4, 0x29, 0x73, 0xb1, 0x57, 0x4c, - 0x71, 0xeb, 0xb6, 0xdc, 0xe8, 0xcb, 0x90, 0xa1, 0xcc, 0x25, 0x5d, 0x56, 0x4c, 0x8b, 0x63, 0xb5, - 0x53, 0xe7, 0x28, 0x08, 0x8a, 0x99, 0xe8, 0x1c, 0x05, 0x81, 0x6e, 0x40, 0x8e, 0xa1, 0xa0, 0x83, - 0x3d, 0xa7, 0x5d, 0xcc, 0xae, 0x6a, 0xe5, 0x9c, 0x1d, 0xed, 0xf5, 0xbb, 0x00, 0x8d, 0x16, 0x6a, - 0x1c, 0xf8, 0x04, 0x7b, 0xac, 0x98, 0x5b, 0xd5, 0xca, 0x85, 0xea, 0x95, 0x51, 0xb7, 0xb6, 0x22, - 0xc4, 0xed, 0x98, 0xbc, 0x6e, 0x41, 0x96, 0xf8, 0x0c, 0x13, 0x8f, 0x16, 0xf3, 0x42, 0x75, 0xc9, - 0x92, 0x68, 0x5a, 0x21, 0x9a, 0xd6, 0xba, 0xd7, 0xb3, 0x43, 0x21, 0xfd, 0x2a, 0xcc, 0x05, 0x5d, - 0x8f, 0x03, 0x5c, 0xf7, 0x1d, 0xd6, 0x2a, 0x82, 0xf0, 0xb3, 0xa0, 0xce, 0x76, 0x1d, 0xd6, 0x32, - 0x9f, 0x82, 0x1e, 0x07, 0x9b, 0xfa, 0xc4, 0xa3, 0xe8, 0xad, 0xd0, 0x5e, 0x80, 0xa4, 0x8f, 0xdd, - 0x62, 0x62, 0x55, 0x2b, 0xcf, 0xdb, 0x7c, 0x69, 0x36, 0x61, 0xee, 0x11, 0x73, 0x02, 0x76, 0x96, - 0x18, 0x7e, 0x04, 0x59, 0xf4, 0x02, 0x35, 0xea, 0xca, 0x72, 0x7e, 0x03, 0xfa, 0x47, 0xa5, 0xcc, - 0xf6, 0x0b, 0xd4, 0xa8, 0x6d, 0xd9, 0x19, 0xfe, 0xa9, 0xe6, 0x9a, 0x57, 0x61, 0x5e, 0x5d, 0xa4, - 0xfc, 0x57, 0xbe, 0x68, 0x03, 0x5f, 0x76, 0x60, 0x71, 0x0b, 0xb5, 0xd1, 0x99, 0x93, 0xca, 0xfc, - 0x55, 0x83, 0x8b, 0xd2, 0x52, 0x74, 0xdb, 0x32, 0x24, 0x22, 0xe5, 0x4c, 0xff, 0xa8, 0x94, 0xa8, - 0x6d, 0xd9, 0x09, 0x7c, 0x02, 0x22, 0x7a, 0x09, 0x0a, 0xe8, 0x05, 0x66, 0x75, 0xca, 0x1c, 0xd6, - 0xe5, 0x69, 0xc9, 0xbf, 0x00, 0x3f, 0x7a, 0x24, 0x4e, 0xf4, 0x75, 0xc8, 0xf3, 0x1d, 0x72, 0xeb, - 0x0e, 0x13, 0x59, 0x58, 0xa8, 0x1a, 0x23, 0x31, 0x7e, 0x1c, 0x56, 0xcc, 0x46, 0xee, 0xd5, 0x51, - 0xe9, 0xc2, 0xcb, 0xbf, 0x4a, 0x9a, 0x9d, 0x93, 0x6a, 0xeb, 0xcc, 0x24, 0xb0, 0x24, 0xfd, 0xdb, - 0x0d, 0x48, 0x03, 0x51, 0x7a, 0xee, 0xe8, 0x23, 0x80, 0x1d, 0x74, 0xfe, 0x41, 0xde, 0x86, 0x82, - 0xb8, 0x46, 0x81, 0x7e, 0x13, 0xb2, 0xbe, 0x7c, 0xa0, 0xb8, 0x62, 0xa8, 0x8c, 0x0e, 0xaf, 0xab, - 0x4a, 0x0a, 0x41, 0x08, 0x85, 0xcd, 0x35, 0x58, 0xf8, 0x1a, 0x53, 0xc6, 0xd3, 0x20, 0x82, 0x66, - 0x19, 0x32, 0xfb, 0xb8, 0xcd, 0x50, 0x20, 0xbd, 0xb5, 0xd5, 0x8e, 0x27, 0x4d, 0x4c, 0x36, 0xaa, - 0x8d, 0xb4, 0x98, 0x02, 0x45, 0x4d, 0x34, 0x95, 0xc9, 0xd7, 0x4a, 0x51, 0xf3, 0xa5, 0x06, 0x85, - 0xaf, 0x70, 0xbb, 0x7d, 0xde, 0x20, 0x89, 0x9e, 0x84, 0x9b, 0xbc, 0xf3, 0xc8, 0xdc, 0x52, 0x3b, - 0x9e, 0x8a, 0x4e, 0xbb, 0x2d, 0x32, 0x2a, 0x67, 0xf3, 0xa5, 0xf9, 0xaf, 0x06, 0x3a, 0x57, 0x7e, - 0x07, 0x59, 0x12, 0xb5, 0xcd, 0xc4, 0xc9, 0x6d, 0x33, 0x39, 0xa6, 0x6d, 0xa6, 0xc6, 0xb6, 0xcd, - 0xf4, 0x50, 0xdb, 0x2c, 0x43, 0x8a, 0xfa, 0xa8, 0x21, 0x1a, 0xed, 0xb8, 0xae, 0x27, 0x24, 0xe2, - 0x28, 0x65, 0xc7, 0xa6, 0xd2, 0x65, 0x78, 0xef, 0xd8, 0xd3, 0x65, 0x64, 0xcd, 0x5f, 0x34, 0x58, - 0xb0, 0x11, 0xc5, 0x3f, 0xa2, 0x5d, 0xd6, 0x3b, 0xf7, 0x50, 0x2d, 0x41, 0xfa, 0x39, 0x76, 0x59, - 0x4b, 0x45, 0x4a, 0x6e, 0x38, 0x3a, 0x2d, 0x84, 0x9b, 0x2d, 0x59, 0xfd, 0xf3, 0xb6, 0xda, 0x99, - 0x3f, 0xc1, 0xc5, 0xcd, 0x36, 0xa1, 0xa8, 0xf6, 0xe0, 0xff, 0x70, 0x4c, 0x86, 0x33, 0x29, 0xa2, - 0x20, 0x37, 0xe6, 0x97, 0xb0, 0xb0, 0xeb, 0x74, 0xe9, 0x99, 0xfb, 0xe7, 0x0e, 0x2c, 0xda, 0x88, - 0x76, 0x3b, 0x67, 0x36, 0xb4, 0x0d, 0x97, 0x78, 0x71, 0xee, 0x62, 0xf7, 0x2c, 0xc9, 0x6b, 0xda, - 0xb2, 0x1f, 0x48, 0x33, 0xaa, 0xc4, 0xef, 0x41, 0x5e, 0xb5, 0x0b, 0x14, 0x96, 0xf9, 0xea, 0xa4, - 0x32, 0xaf, 0x79, 0xfb, 0xc4, 0x1e, 0xa8, 0x98, 0xaf, 0x35, 0xb8, 0xbc, 0x19, 0x8d, 0xed, 0xb3, - 0xd2, 0x98, 0x3a, 0x2c, 0xfa, 0x4e, 0x80, 0x3c, 0x56, 0x8f, 0x51, 0x07, 0x19, 0xbe, 0x2a, 0xef, - 0xff, 0x7f, 0x1e, 0x95, 0xd6, 0x62, 0x84, 0x8c, 0xf8, 0xc8, 0x8b, 0xd4, 0x69, 0xa5, 0x49, 0xae, - 0xb9, 0xb8, 0x89, 0x28, 0xb3, 0xb6, 0xc4, 0x7f, 0xf6, 0x82, 0x34, 0xb6, 0x79, 0x22, 0xad, 0x48, - 0xce, 0x40, 0x2b, 0xcc, 0x27, 0xb0, 0x3c, 0xfc, 0xba, 0x08, 0xb8, 0xc2, 0x80, 0x2c, 0x9e, 0xd8, - 0x21, 0x47, 0xf8, 0x4d, 0x5c, 0xc1, 0xfc, 0x2d, 0x01, 0x8b, 0xdf, 0xf8, 0xee, 0x3b, 0xe0, 0x7e, - 0x55, 0xc8, 0x07, 0x88, 0x92, 0x6e, 0xd0, 0x40, 0x54, 0x80, 0x35, 0xee, 0x55, 0x03, 0x31, 0x7d, - 0x0f, 0x0a, 0x8e, 0xe7, 0x11, 0xe6, 0x84, 0x58, 0x70, 0xef, 0xbf, 0xb0, 0x26, 0xfd, 0x0e, 0xb0, - 0x46, 0xbc, 0xb5, 0xd6, 0x07, 0x26, 0xb6, 0x3d, 0x16, 0xf4, 0xec, 0xb8, 0x51, 0xe3, 0x1e, 0x2c, - 0x0c, 0x0b, 0xf0, 0xe6, 0x7c, 0x80, 0x7a, 0x6a, 0xf6, 0xf0, 0x25, 0x2f, 0xc1, 0x43, 0xa7, 0xdd, - 0x45, 0x61, 0x47, 0x15, 0x9b, 0xdb, 0x89, 0x5b, 0x9a, 0xb9, 0x06, 0x17, 0xef, 0x4b, 0x22, 0x1f, - 0xa2, 0x53, 0x84, 0xac, 0x1c, 0x57, 0x12, 0xef, 0xbc, 0x1d, 0x6e, 0x79, 0x85, 0x44, 0xb2, 0xd1, - 0xf0, 0xca, 0xaa, 0xdf, 0x01, 0x2a, 0x38, 0xc5, 0x13, 0x38, 0xb1, 0x10, 0xb0, 0x43, 0x41, 0x73, - 0x1f, 0x0a, 0xdf, 0x39, 0xf8, 0xfc, 0x07, 0x7c, 0x00, 0x73, 0xf2, 0x1e, 0xe5, 0xeb, 0x10, 0x59, - 0xd2, 0x26, 0x93, 0xa5, 0xc4, 0xdb, 0x90, 0xa5, 0xea, 0xeb, 0x39, 0x48, 0x8b, 0xf1, 0xae, 0x1f, - 0x40, 0x46, 0x12, 0x61, 0xbd, 0x32, 0x39, 0xe2, 0x23, 0xbf, 0x4d, 0x8c, 0xcf, 0x66, 0x57, 0x50, - 0x4f, 0xfb, 0x01, 0xd2, 0x82, 0xb0, 0xea, 0x6b, 0x93, 0x55, 0xe3, 0xf4, 0xd9, 0xf8, 0x64, 0x26, - 0x59, 0x75, 0x43, 0x13, 0x32, 0x92, 0x05, 0x4e, 0x7b, 0xce, 0x08, 0x2b, 0x36, 0x3e, 0x9d, 0x45, - 0x21, 0xba, 0xe8, 0x19, 0xcc, 0x1f, 0xa3, 0x9b, 0x7a, 0x75, 0x16, 0xf5, 0xe3, 0xac, 0xe3, 0x94, - 0x57, 0x3e, 0x85, 0xe4, 0x0e, 0x62, 0x7a, 0x79, 0xb2, 0xd2, 0x80, 0x93, 0x1a, 0x1f, 0xcf, 0x20, - 0x19, 0xe1, 0x96, 0xe2, 0xe3, 0x40, 0xb7, 0x26, 0xab, 0x0c, 0x53, 0x48, 0xa3, 0x32, 0xb3, 0xbc, - 0xba, 0xa8, 0x06, 0x29, 0xce, 0x08, 0xf5, 0x29, 0xbe, 0xc5, 0x58, 0xa3, 0xb1, 0x3c, 0x92, 0xdc, - 0xdb, 0x1d, 0x9f, 0xf5, 0xf4, 0x5d, 0x48, 0xf1, 0x52, 0xd2, 0xa7, 0xe4, 0xe1, 0x28, 0xdb, 0x1b, - 0x6b, 0xf1, 0x11, 0xe4, 0x23, 0x22, 0x34, 0x0d, 0x8a, 0x61, 0xc6, 0x34, 0xd6, 0xe8, 0x03, 0xc8, - 0x2a, 0x0a, 0xa3, 0x4f, 0x89, 0xf7, 0x71, 0xa6, 0x33, 0xc1, 0x60, 0x5a, 0x50, 0x92, 0x69, 0x1e, - 0x0e, 0xf3, 0x96, 0xb1, 0x06, 0x1f, 0x42, 0x46, 0x72, 0x93, 0x69, 0x45, 0x33, 0xc2, 0x60, 0xc6, - 0x9a, 0xc4, 0x90, 0x0b, 0xe9, 0x85, 0x7e, 0x6d, 0x7a, 0x8e, 0xc4, 0xd8, 0x8c, 0x61, 0xcd, 0x2a, - 0xae, 0x32, 0xea, 0x39, 0x40, 0x6c, 0xa8, 0xdf, 0x98, 0x02, 0xf1, 0x49, 0xf4, 0xc4, 0xf8, 0xfc, - 0x74, 0x4a, 0xea, 0xe2, 0x87, 0x90, 0x91, 0x63, 0x70, 0x1a, 0x6c, 0x23, 0xc3, 0x72, 0x2c, 0x6c, - 0xfb, 0x90, 0x55, 0xa3, 0x6b, 0x5a, 0xae, 0x1c, 0x9f, 0x86, 0xc6, 0xb5, 0x19, 0xa5, 0x95, 0xeb, - 0xdf, 0x43, 0x8a, 0xcf, 0x9c, 0x69, 0x55, 0x18, 0x9b, 0x7f, 0xc6, 0xda, 0x2c, 0xa2, 0xd2, 0xfc, - 0xc6, 0xb7, 0xaf, 0xde, 0xac, 0x5c, 0xf8, 0xe3, 0xcd, 0xca, 0x85, 0x9f, 0xfb, 0x2b, 0xda, 0xab, - 0xfe, 0x8a, 0xf6, 0x7b, 0x7f, 0x45, 0xfb, 0xbb, 0xbf, 0xa2, 0x3d, 0xbd, 0xfb, 0x76, 0x7f, 0xa1, - 0xbc, 0x23, 0x16, 0x4f, 0x12, 0x7b, 0x19, 0x01, 0xd8, 0x8d, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, - 0xc7, 0x3c, 0xaa, 0x56, 0xea, 0x14, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// TasksClient is the client API for Tasks service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type TasksClient interface { - // Create a task. - Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) - // Start a process. - Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) - // Delete a task and on disk state. - Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) - DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) - Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) - List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) - // Kill a task or process. - Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*types1.Empty, error) - Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*types1.Empty, error) - ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*types1.Empty, error) - CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*types1.Empty, error) - Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) - Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) - ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) - Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) - Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) - Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) - Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) -} - -type tasksClient struct { - cc *grpc.ClientConn -} - -func NewTasksClient(cc *grpc.ClientConn) TasksClient { - return &tasksClient{cc} -} - -func (c *tasksClient) Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) { - out := new(CreateTaskResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Create", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { - out := new(StartResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Start", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { - out := new(DeleteResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { - out := new(DeleteResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/DeleteProcess", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { - out := new(GetResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) { - out := new(ListTasksResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/List", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Kill", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Exec", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ResizePty", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/CloseIO", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Pause", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Resume", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) { - out := new(ListPidsResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ListPids", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) { - out := new(CheckpointTaskResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Checkpoint", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) { - out := new(MetricsResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Metrics", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *tasksClient) Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) { - out := new(WaitResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Wait", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// TasksServer is the server API for Tasks service. -type TasksServer interface { - // Create a task. - Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) - // Start a process. - Start(context.Context, *StartRequest) (*StartResponse, error) - // Delete a task and on disk state. - Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error) - DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error) - Get(context.Context, *GetRequest) (*GetResponse, error) - List(context.Context, *ListTasksRequest) (*ListTasksResponse, error) - // Kill a task or process. - Kill(context.Context, *KillRequest) (*types1.Empty, error) - Exec(context.Context, *ExecProcessRequest) (*types1.Empty, error) - ResizePty(context.Context, *ResizePtyRequest) (*types1.Empty, error) - CloseIO(context.Context, *CloseIORequest) (*types1.Empty, error) - Pause(context.Context, *PauseTaskRequest) (*types1.Empty, error) - Resume(context.Context, *ResumeTaskRequest) (*types1.Empty, error) - ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error) - Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error) - Update(context.Context, *UpdateTaskRequest) (*types1.Empty, error) - Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error) - Wait(context.Context, *WaitRequest) (*WaitResponse, error) -} - -// UnimplementedTasksServer can be embedded to have forward compatible implementations. -type UnimplementedTasksServer struct { -} - -func (*UnimplementedTasksServer) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") -} -func (*UnimplementedTasksServer) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Start not implemented") -} -func (*UnimplementedTasksServer) Delete(ctx context.Context, req *DeleteTaskRequest) (*DeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (*UnimplementedTasksServer) DeleteProcess(ctx context.Context, req *DeleteProcessRequest) (*DeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteProcess not implemented") -} -func (*UnimplementedTasksServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedTasksServer) List(ctx context.Context, req *ListTasksRequest) (*ListTasksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedTasksServer) Kill(ctx context.Context, req *KillRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Kill not implemented") -} -func (*UnimplementedTasksServer) Exec(ctx context.Context, req *ExecProcessRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Exec not implemented") -} -func (*UnimplementedTasksServer) ResizePty(ctx context.Context, req *ResizePtyRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ResizePty not implemented") -} -func (*UnimplementedTasksServer) CloseIO(ctx context.Context, req *CloseIORequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseIO not implemented") -} -func (*UnimplementedTasksServer) Pause(ctx context.Context, req *PauseTaskRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Pause not implemented") -} -func (*UnimplementedTasksServer) Resume(ctx context.Context, req *ResumeTaskRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Resume not implemented") -} -func (*UnimplementedTasksServer) ListPids(ctx context.Context, req *ListPidsRequest) (*ListPidsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListPids not implemented") -} -func (*UnimplementedTasksServer) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*CheckpointTaskResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Checkpoint not implemented") -} -func (*UnimplementedTasksServer) Update(ctx context.Context, req *UpdateTaskRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (*UnimplementedTasksServer) Metrics(ctx context.Context, req *MetricsRequest) (*MetricsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Metrics not implemented") -} -func (*UnimplementedTasksServer) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Wait not implemented") -} - -func RegisterTasksServer(s *grpc.Server, srv TasksServer) { - s.RegisterService(&_Tasks_serviceDesc, srv) -} - -func _Tasks_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Create", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Create(ctx, req.(*CreateTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StartRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Start(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Start", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Start(ctx, req.(*StartRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Delete(ctx, req.(*DeleteTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_DeleteProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteProcessRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).DeleteProcess(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/DeleteProcess", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).DeleteProcess(ctx, req.(*DeleteProcessRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Get(ctx, req.(*GetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListTasksRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/List", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).List(ctx, req.(*ListTasksRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Kill_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(KillRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Kill(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Kill", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Kill(ctx, req.(*KillRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Exec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExecProcessRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Exec(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Exec", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Exec(ctx, req.(*ExecProcessRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_ResizePty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResizePtyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).ResizePty(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/ResizePty", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).ResizePty(ctx, req.(*ResizePtyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_CloseIO_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CloseIORequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).CloseIO(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/CloseIO", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).CloseIO(ctx, req.(*CloseIORequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Pause_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PauseTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Pause(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Pause", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Pause(ctx, req.(*PauseTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResumeTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Resume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Resume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Resume(ctx, req.(*ResumeTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_ListPids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListPidsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).ListPids(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/ListPids", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).ListPids(ctx, req.(*ListPidsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Checkpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CheckpointTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Checkpoint(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Checkpoint", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Checkpoint(ctx, req.(*CheckpointTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Update(ctx, req.(*UpdateTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Metrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MetricsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Metrics(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Metrics", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Metrics(ctx, req.(*MetricsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Wait_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WaitRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Wait(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Wait", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Wait(ctx, req.(*WaitRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Tasks_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.tasks.v1.Tasks", - HandlerType: (*TasksServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Create", - Handler: _Tasks_Create_Handler, - }, - { - MethodName: "Start", - Handler: _Tasks_Start_Handler, - }, - { - MethodName: "Delete", - Handler: _Tasks_Delete_Handler, - }, - { - MethodName: "DeleteProcess", - Handler: _Tasks_DeleteProcess_Handler, - }, - { - MethodName: "Get", - Handler: _Tasks_Get_Handler, - }, - { - MethodName: "List", - Handler: _Tasks_List_Handler, - }, - { - MethodName: "Kill", - Handler: _Tasks_Kill_Handler, - }, - { - MethodName: "Exec", - Handler: _Tasks_Exec_Handler, - }, - { - MethodName: "ResizePty", - Handler: _Tasks_ResizePty_Handler, - }, - { - MethodName: "CloseIO", - Handler: _Tasks_CloseIO_Handler, - }, - { - MethodName: "Pause", - Handler: _Tasks_Pause_Handler, - }, - { - MethodName: "Resume", - Handler: _Tasks_Resume_Handler, - }, - { - MethodName: "ListPids", - Handler: _Tasks_ListPids_Handler, - }, - { - MethodName: "Checkpoint", - Handler: _Tasks_Checkpoint_Handler, - }, - { - MethodName: "Update", - Handler: _Tasks_Update_Handler, - }, - { - MethodName: "Metrics", - Handler: _Tasks_Metrics_Handler, - }, - { - MethodName: "Wait", - Handler: _Tasks_Wait_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/tasks/v1/tasks.proto", -} - -func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.RuntimePath) > 0 { - i -= len(m.RuntimePath) - copy(dAtA[i:], m.RuntimePath) - i = encodeVarintTasks(dAtA, i, uint64(len(m.RuntimePath))) - i-- - dAtA[i] = 0x52 - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.Checkpoint != nil { - { - size, err := m.Checkpoint.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x32 - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x2a - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0x22 - } - if len(m.Rootfs) > 0 { - for iNdEx := len(m.Rootfs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rootfs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x10 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StartRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StartRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StartResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StartResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *DeleteTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err3 != nil { - return 0, err3 - } - i -= n3 - i = encodeVarintTasks(dAtA, i, uint64(n3)) - i-- - dAtA[i] = 0x22 - if m.ExitStatus != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x18 - } - if m.Pid != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x10 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteProcessRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteProcessRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteProcessRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Process != nil { - { - size, err := m.Process.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListTasksRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListTasksRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListTasksRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filter) > 0 { - i -= len(m.Filter) - copy(dAtA[i:], m.Filter) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Filter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListTasksResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListTasksResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListTasksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Tasks) > 0 { - for iNdEx := len(m.Tasks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tasks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *KillRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KillRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KillRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.All { - i-- - if m.All { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.Signal != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.Signal)) - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecProcessRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x3a - } - if m.Spec != nil { - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x22 - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x1a - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecProcessResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResizePtyRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResizePtyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Height != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x20 - } - if m.Width != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.Width)) - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CloseIORequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CloseIORequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CloseIORequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Stdin { - i-- - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PauseTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PauseTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PauseTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResumeTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResumeTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResumeTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListPidsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListPidsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListPidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListPidsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListPidsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListPidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Processes) > 0 { - for iNdEx := len(m.Processes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Processes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckpointTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CheckpointTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.ParentCheckpoint) > 0 { - i -= len(m.ParentCheckpoint) - copy(dAtA[i:], m.ParentCheckpoint) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ParentCheckpoint))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CheckpointTaskResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckpointTaskResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CheckpointTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Descriptors) > 0 { - for iNdEx := len(m.Descriptors) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Descriptors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Annotations) > 0 { - for k := range m.Annotations { - v := m.Annotations[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintTasks(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintTasks(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintTasks(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if m.Resources != nil { - { - size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MetricsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *MetricsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Metrics) > 0 { - for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *WaitRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WaitRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WaitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WaitResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WaitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err8 != nil { - return 0, err8 - } - i -= n8 - i = encodeVarintTasks(dAtA, i, uint64(n8)) - i-- - dAtA[i] = 0x12 - if m.ExitStatus != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintTasks(dAtA []byte, offset int, v uint64) int { - offset -= sovTasks(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CreateTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if len(m.Rootfs) > 0 { - for _, e := range m.Rootfs { - l = e.Size() - n += 1 + l + sovTasks(uint64(l)) - } - } - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Terminal { - n += 2 - } - if m.Checkpoint != nil { - l = m.Checkpoint.Size() - n += 1 + l + sovTasks(uint64(l)) - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.RuntimePath) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateTaskResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTasks(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StartRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StartResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pid != 0 { - n += 1 + sovTasks(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTasks(uint64(m.Pid)) - } - if m.ExitStatus != 0 { - n += 1 + sovTasks(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovTasks(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteProcessRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Process != nil { - l = m.Process.Size() - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListTasksRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Filter) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListTasksResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Tasks) > 0 { - for _, e := range m.Tasks { - l = e.Size() - n += 1 + l + sovTasks(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *KillRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Signal != 0 { - n += 1 + sovTasks(uint64(m.Signal)) - } - if m.All { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExecProcessRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Terminal { - n += 2 - } - if m.Spec != nil { - l = m.Spec.Size() - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExecProcessResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResizePtyRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Width != 0 { - n += 1 + sovTasks(uint64(m.Width)) - } - if m.Height != 0 { - n += 1 + sovTasks(uint64(m.Height)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CloseIORequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Stdin { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PauseTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResumeTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListPidsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListPidsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Processes) > 0 { - for _, e := range m.Processes { - l = e.Size() - n += 1 + l + sovTasks(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CheckpointTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ParentCheckpoint) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CheckpointTaskResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Descriptors) > 0 { - for _, e := range m.Descriptors { - l = e.Size() - n += 1 + l + sovTasks(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Resources != nil { - l = m.Resources.Size() - n += 1 + l + sovTasks(uint64(l)) - } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovTasks(uint64(len(k))) + 1 + len(v) + sovTasks(uint64(len(v))) - n += mapEntrySize + 1 + sovTasks(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MetricsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovTasks(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MetricsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Metrics) > 0 { - for _, e := range m.Metrics { - l = e.Size() - n += 1 + l + sovTasks(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WaitRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WaitResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ExitStatus != 0 { - n += 1 + sovTasks(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovTasks(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{26} } -func sovTasks(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTasks(x uint64) (n int) { - return sovTasks(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CreateTaskRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForRootfs := "[]*Mount{" - for _, f := range this.Rootfs { - repeatedStringForRootfs += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForRootfs += "}" - s := strings.Join([]string{`&CreateTaskRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Rootfs:` + repeatedStringForRootfs + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `Checkpoint:` + strings.Replace(fmt.Sprintf("%v", this.Checkpoint), "Descriptor", "types.Descriptor", 1) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`, - `RuntimePath:` + fmt.Sprintf("%v", this.RuntimePath) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateTaskResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateTaskResponse{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StartRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StartRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StartResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StartResponse{`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteTaskRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteTaskRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteResponse{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteProcessRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteProcessRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetResponse{`, - `Process:` + strings.Replace(fmt.Sprintf("%v", this.Process), "Process", "task.Process", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListTasksRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListTasksRequest{`, - `Filter:` + fmt.Sprintf("%v", this.Filter) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListTasksResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForTasks := "[]*Process{" - for _, f := range this.Tasks { - repeatedStringForTasks += strings.Replace(fmt.Sprintf("%v", f), "Process", "task.Process", 1) + "," - } - repeatedStringForTasks += "}" - s := strings.Join([]string{`&ListTasksResponse{`, - `Tasks:` + repeatedStringForTasks + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *KillRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&KillRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, - `All:` + fmt.Sprintf("%v", this.All) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ExecProcessRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExecProcessRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "types1.Any", 1) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ExecProcessResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExecProcessResponse{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ResizePtyRequest) String() string { - if this == nil { - return "nil" +func (x *WaitResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus } - s := strings.Join([]string{`&ResizePtyRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Width:` + fmt.Sprintf("%v", this.Width) + `,`, - `Height:` + fmt.Sprintf("%v", this.Height) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s + return 0 } -func (this *CloseIORequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CloseIORequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PauseTaskRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PauseTaskRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ResumeTaskRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResumeTaskRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListPidsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListPidsRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListPidsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForProcesses := "[]*ProcessInfo{" - for _, f := range this.Processes { - repeatedStringForProcesses += strings.Replace(fmt.Sprintf("%v", f), "ProcessInfo", "task.ProcessInfo", 1) + "," - } - repeatedStringForProcesses += "}" - s := strings.Join([]string{`&ListPidsResponse{`, - `Processes:` + repeatedStringForProcesses + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CheckpointTaskRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CheckpointTaskRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ParentCheckpoint:` + fmt.Sprintf("%v", this.ParentCheckpoint) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CheckpointTaskResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForDescriptors := "[]*Descriptor{" - for _, f := range this.Descriptors { - repeatedStringForDescriptors += strings.Replace(fmt.Sprintf("%v", f), "Descriptor", "types.Descriptor", 1) + "," - } - repeatedStringForDescriptors += "}" - s := strings.Join([]string{`&CheckpointTaskResponse{`, - `Descriptors:` + repeatedStringForDescriptors + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateTaskRequest) String() string { - if this == nil { - return "nil" - } - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k, _ := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - s := strings.Join([]string{`&UpdateTaskRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "types1.Any", 1) + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MetricsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricsRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MetricsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForMetrics := "[]*Metric{" - for _, f := range this.Metrics { - repeatedStringForMetrics += strings.Replace(fmt.Sprintf("%v", f), "Metric", "types.Metric", 1) + "," - } - repeatedStringForMetrics += "}" - s := strings.Join([]string{`&MetricsResponse{`, - `Metrics:` + repeatedStringForMetrics + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WaitRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WaitRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WaitResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WaitResponse{`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringTasks(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rootfs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rootfs = append(m.Rootfs, &types.Mount{}) - if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Checkpoint == nil { - m.Checkpoint = &types.Descriptor{} - } - if err := m.Checkpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &types1.Any{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuntimePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *WaitResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt } return nil } -func (m *CreateTaskResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateTaskResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StartRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +var File_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto protoreflect.FileDescriptor - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StartResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteProcessRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteProcessRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Process", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Process == nil { - m.Process = &task.Process{} - } - if err := m.Process.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListTasksRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListTasksRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListTasksRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListTasksResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListTasksResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListTasksResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tasks = append(m.Tasks, &task.Process{}) - if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KillRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KillRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KillRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) - } - m.Signal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Signal |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.All = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecProcessRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Spec == nil { - m.Spec = &types1.Any{} - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecProcessResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecProcessResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecProcessResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResizePtyRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResizePtyRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Width", wireType) - } - m.Width = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Width |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CloseIORequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CloseIORequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CloseIORequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PauseTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PauseTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PauseTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResumeTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResumeTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResumeTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListPidsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListPidsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListPidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListPidsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListPidsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListPidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Processes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Processes = append(m.Processes, &task.ProcessInfo{}) - if err := m.Processes[len(m.Processes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckpointTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckpointTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentCheckpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ParentCheckpoint = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &types1.Any{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CheckpointTaskResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckpointTaskResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckpointTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Descriptors", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Descriptors = append(m.Descriptors, &types.Descriptor{}) - if err := m.Descriptors[len(m.Descriptors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resources == nil { - m.Resources = &types1.Any{} - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTasks - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthTasks - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTasks - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthTasks - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metrics = append(m.Metrics, &types.Metric{}) - if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WaitRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WaitRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WaitRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WaitResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WaitResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WaitResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTasks(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTasks - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTasks - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTasks - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTasks - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTasks - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTasks - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDesc = []byte{ + 0x0a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, + 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3b, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x02, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x2f, 0x0a, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x12, 0x3c, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x50, + 0x61, 0x74, 0x68, 0x22, 0x49, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, + 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, + 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x4a, + 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0x21, 0x0a, 0x0d, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x36, 0x0a, + 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x8c, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, + 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, + 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, + 0x65, 0x64, 0x41, 0x74, 0x22, 0x52, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, + 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, + 0x49, 0x64, 0x22, 0x45, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x36, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x22, 0x2a, 0x0a, 0x10, 0x4c, 0x69, 0x73, + 0x74, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x47, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x73, + 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, 0x74, 0x61, + 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x22, 0x73, + 0x0a, 0x0b, 0x4b, 0x69, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, + 0x61, 0x6c, 0x6c, 0x22, 0xdc, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, + 0x64, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, + 0x65, 0x72, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, + 0x28, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, + 0x63, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, + 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7c, 0x0a, 0x10, 0x52, 0x65, 0x73, + 0x69, 0x7a, 0x65, 0x50, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x77, 0x69, 0x64, + 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, 0x12, + 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x62, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x73, 0x65, + 0x49, 0x4f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, + 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, + 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x22, 0x35, 0x0a, 0x10, 0x50, + 0x61, 0x75, 0x73, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x64, 0x22, 0x36, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x34, 0x0a, 0x0f, 0x4c, 0x69, + 0x73, 0x74, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, + 0x22, 0x52, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x22, 0x97, 0x01, 0x0a, 0x15, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x2e, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x58, + 0x0a, 0x16, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x8e, 0x02, 0x0a, 0x11, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x32, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x0e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x45, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x49, 0x0a, 0x0b, + 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, + 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0x68, 0x0a, 0x0c, 0x57, 0x61, 0x69, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, + 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, + 0x74, 0x32, 0xdc, 0x0c, 0x0a, 0x05, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x6b, 0x0a, 0x06, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, + 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x06, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x72, 0x6f, + 0x63, 0x65, 0x73, 0x73, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x28, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, + 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x67, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, + 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, + 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x04, 0x4b, + 0x69, 0x6c, 0x6c, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x4b, 0x69, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x50, 0x0a, 0x04, 0x45, 0x78, 0x65, 0x63, 0x12, 0x30, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, + 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x53, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x69, + 0x7a, 0x65, 0x50, 0x74, 0x79, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x50, 0x74, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, + 0x07, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x49, 0x4f, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x49, 0x4f, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, + 0x0a, 0x05, 0x50, 0x61, 0x75, 0x73, 0x65, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, + 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x75, 0x73, 0x65, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, + 0x51, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x12, 0x69, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x69, 0x64, 0x73, 0x12, 0x2d, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, + 0x0a, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x33, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x66, 0x0a, 0x07, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x5d, 0x0a, 0x04, 0x57, 0x61, 0x69, 0x74, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x74, 0x61, 0x73, 0x6b, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthTasks = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTasks = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTasks = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescData = file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_goTypes = []interface{}{ + (*CreateTaskRequest)(nil), // 0: containerd.services.tasks.v1.CreateTaskRequest + (*CreateTaskResponse)(nil), // 1: containerd.services.tasks.v1.CreateTaskResponse + (*StartRequest)(nil), // 2: containerd.services.tasks.v1.StartRequest + (*StartResponse)(nil), // 3: containerd.services.tasks.v1.StartResponse + (*DeleteTaskRequest)(nil), // 4: containerd.services.tasks.v1.DeleteTaskRequest + (*DeleteResponse)(nil), // 5: containerd.services.tasks.v1.DeleteResponse + (*DeleteProcessRequest)(nil), // 6: containerd.services.tasks.v1.DeleteProcessRequest + (*GetRequest)(nil), // 7: containerd.services.tasks.v1.GetRequest + (*GetResponse)(nil), // 8: containerd.services.tasks.v1.GetResponse + (*ListTasksRequest)(nil), // 9: containerd.services.tasks.v1.ListTasksRequest + (*ListTasksResponse)(nil), // 10: containerd.services.tasks.v1.ListTasksResponse + (*KillRequest)(nil), // 11: containerd.services.tasks.v1.KillRequest + (*ExecProcessRequest)(nil), // 12: containerd.services.tasks.v1.ExecProcessRequest + (*ExecProcessResponse)(nil), // 13: containerd.services.tasks.v1.ExecProcessResponse + (*ResizePtyRequest)(nil), // 14: containerd.services.tasks.v1.ResizePtyRequest + (*CloseIORequest)(nil), // 15: containerd.services.tasks.v1.CloseIORequest + (*PauseTaskRequest)(nil), // 16: containerd.services.tasks.v1.PauseTaskRequest + (*ResumeTaskRequest)(nil), // 17: containerd.services.tasks.v1.ResumeTaskRequest + (*ListPidsRequest)(nil), // 18: containerd.services.tasks.v1.ListPidsRequest + (*ListPidsResponse)(nil), // 19: containerd.services.tasks.v1.ListPidsResponse + (*CheckpointTaskRequest)(nil), // 20: containerd.services.tasks.v1.CheckpointTaskRequest + (*CheckpointTaskResponse)(nil), // 21: containerd.services.tasks.v1.CheckpointTaskResponse + (*UpdateTaskRequest)(nil), // 22: containerd.services.tasks.v1.UpdateTaskRequest + (*MetricsRequest)(nil), // 23: containerd.services.tasks.v1.MetricsRequest + (*MetricsResponse)(nil), // 24: containerd.services.tasks.v1.MetricsResponse + (*WaitRequest)(nil), // 25: containerd.services.tasks.v1.WaitRequest + (*WaitResponse)(nil), // 26: containerd.services.tasks.v1.WaitResponse + nil, // 27: containerd.services.tasks.v1.UpdateTaskRequest.AnnotationsEntry + (*types.Mount)(nil), // 28: containerd.types.Mount + (*types.Descriptor)(nil), // 29: containerd.types.Descriptor + (*anypb.Any)(nil), // 30: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 31: google.protobuf.Timestamp + (*task.Process)(nil), // 32: containerd.v1.types.Process + (*task.ProcessInfo)(nil), // 33: containerd.v1.types.ProcessInfo + (*types.Metric)(nil), // 34: containerd.types.Metric + (*emptypb.Empty)(nil), // 35: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_depIdxs = []int32{ + 28, // 0: containerd.services.tasks.v1.CreateTaskRequest.rootfs:type_name -> containerd.types.Mount + 29, // 1: containerd.services.tasks.v1.CreateTaskRequest.checkpoint:type_name -> containerd.types.Descriptor + 30, // 2: containerd.services.tasks.v1.CreateTaskRequest.options:type_name -> google.protobuf.Any + 31, // 3: containerd.services.tasks.v1.DeleteResponse.exited_at:type_name -> google.protobuf.Timestamp + 32, // 4: containerd.services.tasks.v1.GetResponse.process:type_name -> containerd.v1.types.Process + 32, // 5: containerd.services.tasks.v1.ListTasksResponse.tasks:type_name -> containerd.v1.types.Process + 30, // 6: containerd.services.tasks.v1.ExecProcessRequest.spec:type_name -> google.protobuf.Any + 33, // 7: containerd.services.tasks.v1.ListPidsResponse.processes:type_name -> containerd.v1.types.ProcessInfo + 30, // 8: containerd.services.tasks.v1.CheckpointTaskRequest.options:type_name -> google.protobuf.Any + 29, // 9: containerd.services.tasks.v1.CheckpointTaskResponse.descriptors:type_name -> containerd.types.Descriptor + 30, // 10: containerd.services.tasks.v1.UpdateTaskRequest.resources:type_name -> google.protobuf.Any + 27, // 11: containerd.services.tasks.v1.UpdateTaskRequest.annotations:type_name -> containerd.services.tasks.v1.UpdateTaskRequest.AnnotationsEntry + 34, // 12: containerd.services.tasks.v1.MetricsResponse.metrics:type_name -> containerd.types.Metric + 31, // 13: containerd.services.tasks.v1.WaitResponse.exited_at:type_name -> google.protobuf.Timestamp + 0, // 14: containerd.services.tasks.v1.Tasks.Create:input_type -> containerd.services.tasks.v1.CreateTaskRequest + 2, // 15: containerd.services.tasks.v1.Tasks.Start:input_type -> containerd.services.tasks.v1.StartRequest + 4, // 16: containerd.services.tasks.v1.Tasks.Delete:input_type -> containerd.services.tasks.v1.DeleteTaskRequest + 6, // 17: containerd.services.tasks.v1.Tasks.DeleteProcess:input_type -> containerd.services.tasks.v1.DeleteProcessRequest + 7, // 18: containerd.services.tasks.v1.Tasks.Get:input_type -> containerd.services.tasks.v1.GetRequest + 9, // 19: containerd.services.tasks.v1.Tasks.List:input_type -> containerd.services.tasks.v1.ListTasksRequest + 11, // 20: containerd.services.tasks.v1.Tasks.Kill:input_type -> containerd.services.tasks.v1.KillRequest + 12, // 21: containerd.services.tasks.v1.Tasks.Exec:input_type -> containerd.services.tasks.v1.ExecProcessRequest + 14, // 22: containerd.services.tasks.v1.Tasks.ResizePty:input_type -> containerd.services.tasks.v1.ResizePtyRequest + 15, // 23: containerd.services.tasks.v1.Tasks.CloseIO:input_type -> containerd.services.tasks.v1.CloseIORequest + 16, // 24: containerd.services.tasks.v1.Tasks.Pause:input_type -> containerd.services.tasks.v1.PauseTaskRequest + 17, // 25: containerd.services.tasks.v1.Tasks.Resume:input_type -> containerd.services.tasks.v1.ResumeTaskRequest + 18, // 26: containerd.services.tasks.v1.Tasks.ListPids:input_type -> containerd.services.tasks.v1.ListPidsRequest + 20, // 27: containerd.services.tasks.v1.Tasks.Checkpoint:input_type -> containerd.services.tasks.v1.CheckpointTaskRequest + 22, // 28: containerd.services.tasks.v1.Tasks.Update:input_type -> containerd.services.tasks.v1.UpdateTaskRequest + 23, // 29: containerd.services.tasks.v1.Tasks.Metrics:input_type -> containerd.services.tasks.v1.MetricsRequest + 25, // 30: containerd.services.tasks.v1.Tasks.Wait:input_type -> containerd.services.tasks.v1.WaitRequest + 1, // 31: containerd.services.tasks.v1.Tasks.Create:output_type -> containerd.services.tasks.v1.CreateTaskResponse + 3, // 32: containerd.services.tasks.v1.Tasks.Start:output_type -> containerd.services.tasks.v1.StartResponse + 5, // 33: containerd.services.tasks.v1.Tasks.Delete:output_type -> containerd.services.tasks.v1.DeleteResponse + 5, // 34: containerd.services.tasks.v1.Tasks.DeleteProcess:output_type -> containerd.services.tasks.v1.DeleteResponse + 8, // 35: containerd.services.tasks.v1.Tasks.Get:output_type -> containerd.services.tasks.v1.GetResponse + 10, // 36: containerd.services.tasks.v1.Tasks.List:output_type -> containerd.services.tasks.v1.ListTasksResponse + 35, // 37: containerd.services.tasks.v1.Tasks.Kill:output_type -> google.protobuf.Empty + 35, // 38: containerd.services.tasks.v1.Tasks.Exec:output_type -> google.protobuf.Empty + 35, // 39: containerd.services.tasks.v1.Tasks.ResizePty:output_type -> google.protobuf.Empty + 35, // 40: containerd.services.tasks.v1.Tasks.CloseIO:output_type -> google.protobuf.Empty + 35, // 41: containerd.services.tasks.v1.Tasks.Pause:output_type -> google.protobuf.Empty + 35, // 42: containerd.services.tasks.v1.Tasks.Resume:output_type -> google.protobuf.Empty + 19, // 43: containerd.services.tasks.v1.Tasks.ListPids:output_type -> containerd.services.tasks.v1.ListPidsResponse + 21, // 44: containerd.services.tasks.v1.Tasks.Checkpoint:output_type -> containerd.services.tasks.v1.CheckpointTaskResponse + 35, // 45: containerd.services.tasks.v1.Tasks.Update:output_type -> google.protobuf.Empty + 24, // 46: containerd.services.tasks.v1.Tasks.Metrics:output_type -> containerd.services.tasks.v1.MetricsResponse + 26, // 47: containerd.services.tasks.v1.Tasks.Wait:output_type -> containerd.services.tasks.v1.WaitResponse + 31, // [31:48] is the sub-list for method output_type + 14, // [14:31] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_init() } +func file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_init() { + if File_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateTaskResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteProcessRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListTasksRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListTasksResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KillRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecProcessRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecProcessResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResizePtyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloseIORequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PauseTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResumeTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListPidsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListPidsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckpointTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckpointTaskResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WaitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WaitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDesc, + NumEnums: 0, + NumMessages: 28, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto = out.File + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto index 6299c76026..8ddd319260 100644 --- a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto +++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto @@ -20,7 +20,6 @@ package containerd.services.tasks.v1; import "google/protobuf/empty.proto"; import "google/protobuf/any.proto"; -import weak "gogoproto/gogo.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; import "github.com/containerd/containerd/api/types/metrics.proto"; import "github.com/containerd/containerd/api/types/descriptor.proto"; @@ -114,7 +113,7 @@ message DeleteResponse { string id = 1; uint32 pid = 2; uint32 exit_status = 3; - google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 4; } message DeleteProcessRequest { @@ -195,7 +194,7 @@ message ListPidsResponse { message CheckpointTaskRequest { string container_id = 1; - string parent_checkpoint = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string parent_checkpoint = 2; google.protobuf.Any options = 3; } @@ -224,5 +223,5 @@ message WaitRequest { message WaitResponse { uint32 exit_status = 1; - google.protobuf.Timestamp exited_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 2; } diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_grpc.pb.go new file mode 100644 index 0000000000..3fd4057e9b --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_grpc.pb.go @@ -0,0 +1,690 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto + +package tasks + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// TasksClient is the client API for Tasks service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type TasksClient interface { + // Create a task. + Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) + // Start a process. + Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) + // Delete a task and on disk state. + Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) + DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) + // Kill a task or process. + Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) + Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) + Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) + Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) +} + +type tasksClient struct { + cc grpc.ClientConnInterface +} + +func NewTasksClient(cc grpc.ClientConnInterface) TasksClient { + return &tasksClient{cc} +} + +func (c *tasksClient) Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) { + out := new(CreateTaskResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { + out := new(StartResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Start", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { + out := new(DeleteResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { + out := new(DeleteResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/DeleteProcess", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) { + out := new(ListTasksResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Kill", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Exec", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ResizePty", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/CloseIO", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Pause", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Resume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) { + out := new(ListPidsResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ListPids", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) { + out := new(CheckpointTaskResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Checkpoint", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) { + out := new(MetricsResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Metrics", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) { + out := new(WaitResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Wait", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TasksServer is the server API for Tasks service. +// All implementations must embed UnimplementedTasksServer +// for forward compatibility +type TasksServer interface { + // Create a task. + Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) + // Start a process. + Start(context.Context, *StartRequest) (*StartResponse, error) + // Delete a task and on disk state. + Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error) + DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error) + Get(context.Context, *GetRequest) (*GetResponse, error) + List(context.Context, *ListTasksRequest) (*ListTasksResponse, error) + // Kill a task or process. + Kill(context.Context, *KillRequest) (*emptypb.Empty, error) + Exec(context.Context, *ExecProcessRequest) (*emptypb.Empty, error) + ResizePty(context.Context, *ResizePtyRequest) (*emptypb.Empty, error) + CloseIO(context.Context, *CloseIORequest) (*emptypb.Empty, error) + Pause(context.Context, *PauseTaskRequest) (*emptypb.Empty, error) + Resume(context.Context, *ResumeTaskRequest) (*emptypb.Empty, error) + ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error) + Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error) + Update(context.Context, *UpdateTaskRequest) (*emptypb.Empty, error) + Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error) + Wait(context.Context, *WaitRequest) (*WaitResponse, error) + mustEmbedUnimplementedTasksServer() +} + +// UnimplementedTasksServer must be embedded to have forward compatible implementations. +type UnimplementedTasksServer struct { +} + +func (UnimplementedTasksServer) Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedTasksServer) Start(context.Context, *StartRequest) (*StartResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Start not implemented") +} +func (UnimplementedTasksServer) Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedTasksServer) DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteProcess not implemented") +} +func (UnimplementedTasksServer) Get(context.Context, *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedTasksServer) List(context.Context, *ListTasksRequest) (*ListTasksResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedTasksServer) Kill(context.Context, *KillRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Kill not implemented") +} +func (UnimplementedTasksServer) Exec(context.Context, *ExecProcessRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Exec not implemented") +} +func (UnimplementedTasksServer) ResizePty(context.Context, *ResizePtyRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResizePty not implemented") +} +func (UnimplementedTasksServer) CloseIO(context.Context, *CloseIORequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CloseIO not implemented") +} +func (UnimplementedTasksServer) Pause(context.Context, *PauseTaskRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Pause not implemented") +} +func (UnimplementedTasksServer) Resume(context.Context, *ResumeTaskRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Resume not implemented") +} +func (UnimplementedTasksServer) ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListPids not implemented") +} +func (UnimplementedTasksServer) Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Checkpoint not implemented") +} +func (UnimplementedTasksServer) Update(context.Context, *UpdateTaskRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedTasksServer) Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Metrics not implemented") +} +func (UnimplementedTasksServer) Wait(context.Context, *WaitRequest) (*WaitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Wait not implemented") +} +func (UnimplementedTasksServer) mustEmbedUnimplementedTasksServer() {} + +// UnsafeTasksServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to TasksServer will +// result in compilation errors. +type UnsafeTasksServer interface { + mustEmbedUnimplementedTasksServer() +} + +func RegisterTasksServer(s grpc.ServiceRegistrar, srv TasksServer) { + s.RegisterService(&Tasks_ServiceDesc, srv) +} + +func _Tasks_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Create(ctx, req.(*CreateTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Start(ctx, req.(*StartRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Delete(ctx, req.(*DeleteTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_DeleteProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).DeleteProcess(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/DeleteProcess", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).DeleteProcess(ctx, req.(*DeleteProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTasksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).List(ctx, req.(*ListTasksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Kill_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KillRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Kill(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Kill", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Kill(ctx, req.(*KillRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Exec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExecProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Exec(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Exec", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Exec(ctx, req.(*ExecProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_ResizePty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResizePtyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).ResizePty(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/ResizePty", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).ResizePty(ctx, req.(*ResizePtyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_CloseIO_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CloseIORequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).CloseIO(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/CloseIO", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).CloseIO(ctx, req.(*CloseIORequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Pause_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PauseTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Pause(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Pause", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Pause(ctx, req.(*PauseTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResumeTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Resume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Resume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Resume(ctx, req.(*ResumeTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_ListPids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPidsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).ListPids(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/ListPids", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).ListPids(ctx, req.(*ListPidsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Checkpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckpointTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Checkpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Checkpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Checkpoint(ctx, req.(*CheckpointTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Update(ctx, req.(*UpdateTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Metrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MetricsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Metrics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Metrics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Metrics(ctx, req.(*MetricsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Wait_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WaitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Wait(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Wait", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Wait(ctx, req.(*WaitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Tasks_ServiceDesc is the grpc.ServiceDesc for Tasks service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Tasks_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.tasks.v1.Tasks", + HandlerType: (*TasksServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Create", + Handler: _Tasks_Create_Handler, + }, + { + MethodName: "Start", + Handler: _Tasks_Start_Handler, + }, + { + MethodName: "Delete", + Handler: _Tasks_Delete_Handler, + }, + { + MethodName: "DeleteProcess", + Handler: _Tasks_DeleteProcess_Handler, + }, + { + MethodName: "Get", + Handler: _Tasks_Get_Handler, + }, + { + MethodName: "List", + Handler: _Tasks_List_Handler, + }, + { + MethodName: "Kill", + Handler: _Tasks_Kill_Handler, + }, + { + MethodName: "Exec", + Handler: _Tasks_Exec_Handler, + }, + { + MethodName: "ResizePty", + Handler: _Tasks_ResizePty_Handler, + }, + { + MethodName: "CloseIO", + Handler: _Tasks_CloseIO_Handler, + }, + { + MethodName: "Pause", + Handler: _Tasks_Pause_Handler, + }, + { + MethodName: "Resume", + Handler: _Tasks_Resume_Handler, + }, + { + MethodName: "ListPids", + Handler: _Tasks_ListPids_Handler, + }, + { + MethodName: "Checkpoint", + Handler: _Tasks_Checkpoint_Handler, + }, + { + MethodName: "Update", + Handler: _Tasks_Update_Handler, + }, + { + MethodName: "Metrics", + Handler: _Tasks_Metrics_Handler, + }, + { + MethodName: "Wait", + Handler: _Tasks_Wait_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/tasks/v1/tasks.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/transfer/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/transfer/v1/doc.go new file mode 100644 index 0000000000..0882a6e922 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/transfer/v1/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transfer diff --git a/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.pb.go b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.pb.go new file mode 100644 index 0000000000..b6e959babd --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.pb.go @@ -0,0 +1,274 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/transfer/v1/transfer.proto + +package transfer + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TransferRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Source *anypb.Any `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + Destination *anypb.Any `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"` + Options *TransferOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *TransferRequest) Reset() { + *x = TransferRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransferRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransferRequest) ProtoMessage() {} + +func (x *TransferRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransferRequest.ProtoReflect.Descriptor instead. +func (*TransferRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescGZIP(), []int{0} +} + +func (x *TransferRequest) GetSource() *anypb.Any { + if x != nil { + return x.Source + } + return nil +} + +func (x *TransferRequest) GetDestination() *anypb.Any { + if x != nil { + return x.Destination + } + return nil +} + +func (x *TransferRequest) GetOptions() *TransferOptions { + if x != nil { + return x.Options + } + return nil +} + +type TransferOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ProgressStream string `protobuf:"bytes,1,opt,name=progress_stream,json=progressStream,proto3" json:"progress_stream,omitempty"` // Progress min interval +} + +func (x *TransferOptions) Reset() { + *x = TransferOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransferOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransferOptions) ProtoMessage() {} + +func (x *TransferOptions) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransferOptions.ProtoReflect.Descriptor instead. +func (*TransferOptions) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescGZIP(), []int{1} +} + +func (x *TransferOptions) GetProgressStream() string { + if x != nil { + return x.ProgressStream + } + return "" +} + +var File_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDesc = []byte{ + 0x0a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x66, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3a, 0x0a, 0x0f, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x32, 0x60, 0x0a, 0x08, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, + 0x72, 0x12, 0x54, 0x0a, 0x08, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x12, 0x30, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x44, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, + 0x72, 0x2f, 0x76, 0x31, 0x3b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescData = file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_goTypes = []interface{}{ + (*TransferRequest)(nil), // 0: containerd.services.transfer.v1.TransferRequest + (*TransferOptions)(nil), // 1: containerd.services.transfer.v1.TransferOptions + (*anypb.Any)(nil), // 2: google.protobuf.Any + (*emptypb.Empty)(nil), // 3: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_depIdxs = []int32{ + 2, // 0: containerd.services.transfer.v1.TransferRequest.source:type_name -> google.protobuf.Any + 2, // 1: containerd.services.transfer.v1.TransferRequest.destination:type_name -> google.protobuf.Any + 1, // 2: containerd.services.transfer.v1.TransferRequest.options:type_name -> containerd.services.transfer.v1.TransferOptions + 0, // 3: containerd.services.transfer.v1.Transfer.Transfer:input_type -> containerd.services.transfer.v1.TransferRequest + 3, // 4: containerd.services.transfer.v1.Transfer.Transfer:output_type -> google.protobuf.Empty + 4, // [4:5] is the sub-list for method output_type + 3, // [3:4] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_init() } +func file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_init() { + if File_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransferRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransferOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto = out.File + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.proto b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.proto new file mode 100644 index 0000000000..a8f25ee593 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.proto @@ -0,0 +1,39 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.services.transfer.v1; + +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; + +option go_package = "github.com/containerd/containerd/api/services/transfer/v1;transfer"; + +service Transfer { + rpc Transfer(TransferRequest) returns (google.protobuf.Empty); +} + +message TransferRequest { + google.protobuf.Any source = 1; + google.protobuf.Any destination = 2; + TransferOptions options = 3; +} + +message TransferOptions { + string progress_stream = 1; + // Progress min interval +} diff --git a/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer_grpc.pb.go new file mode 100644 index 0000000000..cf108744bd --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer_grpc.pb.go @@ -0,0 +1,106 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/transfer/v1/transfer.proto + +package transfer + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// TransferClient is the client API for Transfer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type TransferClient interface { + Transfer(ctx context.Context, in *TransferRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type transferClient struct { + cc grpc.ClientConnInterface +} + +func NewTransferClient(cc grpc.ClientConnInterface) TransferClient { + return &transferClient{cc} +} + +func (c *transferClient) Transfer(ctx context.Context, in *TransferRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.transfer.v1.Transfer/Transfer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TransferServer is the server API for Transfer service. +// All implementations must embed UnimplementedTransferServer +// for forward compatibility +type TransferServer interface { + Transfer(context.Context, *TransferRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedTransferServer() +} + +// UnimplementedTransferServer must be embedded to have forward compatible implementations. +type UnimplementedTransferServer struct { +} + +func (UnimplementedTransferServer) Transfer(context.Context, *TransferRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Transfer not implemented") +} +func (UnimplementedTransferServer) mustEmbedUnimplementedTransferServer() {} + +// UnsafeTransferServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to TransferServer will +// result in compilation errors. +type UnsafeTransferServer interface { + mustEmbedUnimplementedTransferServer() +} + +func RegisterTransferServer(s grpc.ServiceRegistrar, srv TransferServer) { + s.RegisterService(&Transfer_ServiceDesc, srv) +} + +func _Transfer_Transfer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TransferRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TransferServer).Transfer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.transfer.v1.Transfer/Transfer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TransferServer).Transfer(ctx, req.(*TransferRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Transfer_ServiceDesc is the grpc.ServiceDesc for Transfer service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Transfer_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.transfer.v1.Transfer", + HandlerType: (*TransferServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Transfer", + Handler: _Transfer_Transfer_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/transfer/v1/transfer.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go index b1f275bf0d..221b183f7c 100644 --- a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go @@ -1,760 +1,292 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto package events import ( - context "context" - fmt "fmt" - github_com_containerd_ttrpc "github.com/containerd/ttrpc" - github_com_containerd_typeurl "github.com/containerd/typeurl" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type ForwardRequest struct { - Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` } -func (m *ForwardRequest) Reset() { *m = ForwardRequest{} } -func (*ForwardRequest) ProtoMessage() {} -func (*ForwardRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_19f98672016720b5, []int{0} -} -func (m *ForwardRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ForwardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ForwardRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ForwardRequest) Reset() { + *x = ForwardRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ForwardRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ForwardRequest.Merge(m, src) -} -func (m *ForwardRequest) XXX_Size() int { - return m.Size() -} -func (m *ForwardRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ForwardRequest.DiscardUnknown(m) + +func (x *ForwardRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ForwardRequest proto.InternalMessageInfo +func (*ForwardRequest) ProtoMessage() {} + +func (x *ForwardRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForwardRequest.ProtoReflect.Descriptor instead. +func (*ForwardRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescGZIP(), []int{0} +} + +func (x *ForwardRequest) GetEnvelope() *Envelope { + if x != nil { + return x.Envelope + } + return nil +} type Envelope struct { - Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"` - Event *types.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"` + Event *anypb.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` +} + +func (x *Envelope) Reset() { + *x = Envelope{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Envelope) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Envelope) Reset() { *m = Envelope{} } func (*Envelope) ProtoMessage() {} + +func (x *Envelope) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Envelope.ProtoReflect.Descriptor instead. func (*Envelope) Descriptor() ([]byte, []int) { - return fileDescriptor_19f98672016720b5, []int{1} -} -func (m *Envelope) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Envelope.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Envelope) XXX_Merge(src proto.Message) { - xxx_messageInfo_Envelope.Merge(m, src) -} -func (m *Envelope) XXX_Size() int { - return m.Size() -} -func (m *Envelope) XXX_DiscardUnknown() { - xxx_messageInfo_Envelope.DiscardUnknown(m) + return file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescGZIP(), []int{1} } -var xxx_messageInfo_Envelope proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ForwardRequest)(nil), "containerd.services.events.ttrpc.v1.ForwardRequest") - proto.RegisterType((*Envelope)(nil), "containerd.services.events.ttrpc.v1.Envelope") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto", fileDescriptor_19f98672016720b5) -} - -var fileDescriptor_19f98672016720b5 = []byte{ - // 396 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x52, 0xc1, 0x8e, 0xd3, 0x30, - 0x10, 0x8d, 0x61, 0x77, 0x69, 0x8d, 0xc4, 0xc1, 0xaa, 0x50, 0x08, 0x28, 0x59, 0x2d, 0x97, 0x15, - 0x12, 0xb6, 0x76, 0xf7, 0x06, 0x17, 0xa8, 0x28, 0x12, 0x1c, 0x23, 0x84, 0x2a, 0x90, 0x10, 0x6e, - 0x3a, 0x4d, 0x2d, 0x25, 0xb6, 0x49, 0x9c, 0xa0, 0xde, 0xfa, 0x09, 0x7c, 0x0c, 0x17, 0xfe, 0xa0, - 0x47, 0x8e, 0x9c, 0x80, 0xe6, 0x4b, 0x50, 0x9d, 0xa4, 0x81, 0xf6, 0x40, 0xa5, 0xbd, 0xbd, 0xcc, - 0x7b, 0x6f, 0xde, 0xcc, 0xc4, 0xf8, 0x75, 0x2c, 0xcc, 0xbc, 0x98, 0xd0, 0x48, 0xa5, 0x2c, 0x52, - 0xd2, 0x70, 0x21, 0x21, 0x9b, 0xfe, 0x0d, 0xb9, 0x16, 0x2c, 0x87, 0xac, 0x14, 0x11, 0xe4, 0xcc, - 0x98, 0x4c, 0x47, 0x0c, 0x4a, 0x90, 0x26, 0x67, 0xe5, 0x45, 0x83, 0xa8, 0xce, 0x94, 0x51, 0xe4, - 0x61, 0xe7, 0xa2, 0xad, 0x83, 0x36, 0x0a, 0x6b, 0xa4, 0xe5, 0x85, 0xf7, 0xec, 0xbf, 0x81, 0xb6, - 0xd9, 0xa4, 0x98, 0x31, 0x9d, 0x14, 0xb1, 0x90, 0x6c, 0x26, 0x20, 0x99, 0x6a, 0x6e, 0xe6, 0x75, - 0x8c, 0x37, 0x88, 0x55, 0xac, 0x2c, 0x64, 0x1b, 0xd4, 0x54, 0xef, 0xc5, 0x4a, 0xc5, 0x09, 0x74, - 0x6e, 0x2e, 0x17, 0x0d, 0x75, 0x7f, 0x97, 0x82, 0x54, 0x9b, 0x96, 0x0c, 0x76, 0x49, 0x23, 0x52, - 0xc8, 0x0d, 0x4f, 0x75, 0x2d, 0x38, 0x7b, 0x8f, 0xef, 0xbc, 0x54, 0xd9, 0x67, 0x9e, 0x4d, 0x43, - 0xf8, 0x54, 0x40, 0x6e, 0xc8, 0x2b, 0xdc, 0x03, 0x59, 0x42, 0xa2, 0x34, 0xb8, 0xe8, 0x14, 0x9d, - 0xdf, 0xbe, 0x7c, 0x4c, 0x0f, 0x58, 0x9d, 0x8e, 0x1a, 0x53, 0xb8, 0xb5, 0x9f, 0x7d, 0x45, 0xb8, - 0xd7, 0x96, 0xc9, 0x10, 0xf7, 0xb7, 0xe1, 0x4d, 0x63, 0x8f, 0xd6, 0xe3, 0xd1, 0x76, 0x3c, 0xfa, - 0xa6, 0x55, 0x0c, 0x7b, 0xab, 0x9f, 0x81, 0xf3, 0xe5, 0x57, 0x80, 0xc2, 0xce, 0x46, 0x1e, 0xe0, - 0xbe, 0xe4, 0x29, 0xe4, 0x9a, 0x47, 0xe0, 0xde, 0x38, 0x45, 0xe7, 0xfd, 0xb0, 0x2b, 0x90, 0x01, - 0x3e, 0x36, 0x4a, 0x8b, 0xc8, 0xbd, 0x69, 0x99, 0xfa, 0x83, 0x3c, 0xc2, 0xc7, 0x76, 0x54, 0xf7, - 0xc8, 0x66, 0x0e, 0xf6, 0x32, 0x9f, 0xcb, 0x45, 0x58, 0x4b, 0x9e, 0x1c, 0x2d, 0xbf, 0x05, 0xe8, - 0xf2, 0x23, 0x3e, 0x19, 0xd9, 0xe5, 0xc8, 0x5b, 0x7c, 0xab, 0xb9, 0x0e, 0xb9, 0x3a, 0xe8, 0x08, - 0xff, 0xde, 0xd2, 0xbb, 0xbb, 0x17, 0x36, 0xda, 0xfc, 0x9c, 0xe1, 0x87, 0xd5, 0xda, 0x77, 0x7e, - 0xac, 0x7d, 0x67, 0x59, 0xf9, 0x68, 0x55, 0xf9, 0xe8, 0x7b, 0xe5, 0xa3, 0xdf, 0x95, 0x8f, 0xde, - 0xbd, 0xb8, 0xd6, 0x8b, 0x7d, 0x5a, 0xa3, 0xb1, 0x33, 0x46, 0x93, 0x13, 0x9b, 0x79, 0xf5, 0x27, - 0x00, 0x00, 0xff, 0xff, 0xd4, 0x90, 0xbd, 0x09, 0x04, 0x03, 0x00, 0x00, -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *Envelope) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - // unhandled: timestamp - case "namespace": - return string(m.Namespace), len(m.Namespace) > 0 - case "topic": - return string(m.Topic), len(m.Topic) > 0 - case "event": - decoded, err := github_com_containerd_typeurl.UnmarshalAny(m.Event) - if err != nil { - return "", false - } - - adaptor, ok := decoded.(interface{ Field([]string) (string, bool) }) - if !ok { - return "", false - } - return adaptor.Field(fieldpath[1:]) - } - return "", false -} -func (m *ForwardRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ForwardRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ForwardRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Envelope != nil { - { - size, err := m.Envelope.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvents(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Envelope) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Envelope) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Envelope) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Event != nil { - { - size, err := m.Event.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvents(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.Topic) > 0 { - i -= len(m.Topic) - copy(dAtA[i:], m.Topic) - i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic))) - i-- - dAtA[i] = 0x1a - } - if len(m.Namespace) > 0 { - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintEvents(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - } - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err3 != nil { - return 0, err3 - } - i -= n3 - i = encodeVarintEvents(dAtA, i, uint64(n3)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { - offset -= sovEvents(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ForwardRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Envelope != nil { - l = m.Envelope.Size() - n += 1 + l + sovEvents(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Envelope) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovEvents(uint64(l)) - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovEvents(uint64(l)) - } - l = len(m.Topic) - if l > 0 { - n += 1 + l + sovEvents(uint64(l)) - } - if m.Event != nil { - l = m.Event.Size() - n += 1 + l + sovEvents(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovEvents(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozEvents(x uint64) (n int) { - return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ForwardRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ForwardRequest{`, - `Envelope:` + strings.Replace(this.Envelope.String(), "Envelope", "Envelope", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Envelope) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Envelope{`, - `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, - `Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringEvents(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} - -type EventsService interface { - Forward(ctx context.Context, req *ForwardRequest) (*types.Empty, error) -} - -func RegisterEventsService(srv *github_com_containerd_ttrpc.Server, svc EventsService) { - srv.Register("containerd.services.events.ttrpc.v1.Events", map[string]github_com_containerd_ttrpc.Method{ - "Forward": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ForwardRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Forward(ctx, &req) - }, - }) -} - -type eventsClient struct { - client *github_com_containerd_ttrpc.Client -} - -func NewEventsClient(client *github_com_containerd_ttrpc.Client) EventsService { - return &eventsClient{ - client: client, - } -} - -func (c *eventsClient) Forward(ctx context.Context, req *ForwardRequest) (*types.Empty, error) { - var resp types.Empty - if err := c.client.Call(ctx, "containerd.services.events.ttrpc.v1.Events", "Forward", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} -func (m *ForwardRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ForwardRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ForwardRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Envelope", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Envelope == nil { - m.Envelope = &Envelope{} - } - if err := m.Envelope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Envelope) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } -func (m *Envelope) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Envelope: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Envelope: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Topic = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Event == nil { - m.Event = &types.Any{} - } - if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Envelope) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *Envelope) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *Envelope) GetEvent() *anypb.Any { + if x != nil { + return x.Event } return nil } -func skipEvents(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvents - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvents - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvents - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthEvents - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupEvents - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthEvents - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + +var File_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDesc = []byte{ + 0x0a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x74, 0x74, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x2f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x23, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x76, + 0x31, 0x1a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5b, 0x0a, 0x0e, + 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, + 0x0a, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x74, 0x74, + 0x72, 0x70, 0x63, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x52, + 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x22, 0xaa, 0x01, 0x0a, 0x08, 0x45, 0x6e, + 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x3a, 0x04, 0x80, 0xb9, 0x1f, 0x01, 0x32, 0x60, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x56, 0x0a, 0x07, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x12, 0x33, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x76, + 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescData = file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_goTypes = []interface{}{ + (*ForwardRequest)(nil), // 0: containerd.services.events.ttrpc.v1.ForwardRequest + (*Envelope)(nil), // 1: containerd.services.events.ttrpc.v1.Envelope + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp + (*anypb.Any)(nil), // 3: google.protobuf.Any + (*emptypb.Empty)(nil), // 4: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_depIdxs = []int32{ + 1, // 0: containerd.services.events.ttrpc.v1.ForwardRequest.envelope:type_name -> containerd.services.events.ttrpc.v1.Envelope + 2, // 1: containerd.services.events.ttrpc.v1.Envelope.timestamp:type_name -> google.protobuf.Timestamp + 3, // 2: containerd.services.events.ttrpc.v1.Envelope.event:type_name -> google.protobuf.Any + 0, // 3: containerd.services.events.ttrpc.v1.Events.Forward:input_type -> containerd.services.events.ttrpc.v1.ForwardRequest + 4, // 4: containerd.services.events.ttrpc.v1.Events.Forward:output_type -> google.protobuf.Empty + 4, // [4:5] is the sub-list for method output_type + 3, // [3:4] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_init() } +func file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_init() { + if File_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ForwardRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Envelope); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto = out.File + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto index ade1c7abef..e0c2f232d3 100644 --- a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto @@ -18,8 +18,7 @@ syntax = "proto3"; package containerd.services.events.ttrpc.v1; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; -import weak "gogoproto/gogo.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; @@ -41,7 +40,7 @@ message ForwardRequest { message Envelope { option (containerd.plugin.fieldpath) = true; - google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 1; string namespace = 2; string topic = 3; google.protobuf.Any event = 4; diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_fieldpath.pb.go new file mode 100644 index 0000000000..ad48127be9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_fieldpath.pb.go @@ -0,0 +1,55 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto +package events + +import ( + v2 "github.com/containerd/typeurl/v2" +) + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ForwardRequest) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "envelope": + // NOTE(stevvooe): This is probably not correct in many cases. + // We assume that the target message also implements the Field + // method, which isn't likely true in a lot of cases. + // + // If you have a broken build and have found this comment, + // you may be closer to a solution. + if m.Envelope == nil { + return "", false + } + return m.Envelope.Field(fieldpath[1:]) + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *Envelope) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: timestamp + case "namespace": + return string(m.Namespace), len(m.Namespace) > 0 + case "topic": + return string(m.Topic), len(m.Topic) > 0 + case "event": + decoded, err := v2.UnmarshalAny(m.Event) + if err != nil { + return "", false + } + adaptor, ok := decoded.(interface{ Field([]string) (string, bool) }) + if !ok { + return "", false + } + return adaptor.Field(fieldpath[1:]) + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_ttrpc.pb.go new file mode 100644 index 0000000000..8828c6cbcc --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_ttrpc.pb.go @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT. +// source: github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto +package events + +import ( + context "context" + ttrpc "github.com/containerd/ttrpc" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +type EventsService interface { + Forward(context.Context, *ForwardRequest) (*emptypb.Empty, error) +} + +func RegisterEventsService(srv *ttrpc.Server, svc EventsService) { + srv.RegisterService("containerd.services.events.ttrpc.v1.Events", &ttrpc.ServiceDesc{ + Methods: map[string]ttrpc.Method{ + "Forward": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ForwardRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Forward(ctx, &req) + }, + }, + }) +} + +type eventsClient struct { + client *ttrpc.Client +} + +func NewEventsClient(client *ttrpc.Client) EventsService { + return &eventsClient{ + client: client, + } +} + +func (c *eventsClient) Forward(ctx context.Context, req *ForwardRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.services.events.ttrpc.v1.Events", "Forward", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go index b742c6ae62..c087d3e26b 100644 --- a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go @@ -1,476 +1,187 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/version/v1/version.proto package version import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type VersionResponse struct { - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` +} + +func (x *VersionResponse) Reset() { + *x = VersionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersionResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VersionResponse) Reset() { *m = VersionResponse{} } func (*VersionResponse) ProtoMessage() {} + +func (x *VersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionResponse.ProtoReflect.Descriptor instead. func (*VersionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_128109001e578ffe, []int{0} + return file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescGZIP(), []int{0} } -func (m *VersionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + +func (x *VersionResponse) GetVersion() string { + if x != nil { + return x.Version } -} -func (m *VersionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_VersionResponse.Merge(m, src) -} -func (m *VersionResponse) XXX_Size() int { - return m.Size() -} -func (m *VersionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_VersionResponse.DiscardUnknown(m) + return "" } -var xxx_messageInfo_VersionResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*VersionResponse)(nil), "containerd.services.version.v1.VersionResponse") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptor_128109001e578ffe) -} - -var fileDescriptor_128109001e578ffe = []byte{ - // 243 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, - 0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9, - 0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2, - 0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05, - 0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11, - 0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, - 0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92, - 0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e, - 0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9, - 0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50, - 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, - 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x46, 0x30, - 0x26, 0xb1, 0x81, 0x9d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x0d, 0x52, 0x23, 0xa9, - 0x01, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// VersionClient is the client API for Version service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type VersionClient interface { - Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error) -} - -type versionClient struct { - cc *grpc.ClientConn -} - -func NewVersionClient(cc *grpc.ClientConn) VersionClient { - return &versionClient{cc} -} - -func (c *versionClient) Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error) { - out := new(VersionResponse) - err := c.cc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, opts...) - if err != nil { - return nil, err +func (x *VersionResponse) GetRevision() string { + if x != nil { + return x.Revision } - return out, nil + return "" } -// VersionServer is the server API for Version service. -type VersionServer interface { - Version(context.Context, *types.Empty) (*VersionResponse, error) -} +var File_github_com_containerd_containerd_api_services_version_v1_version_proto protoreflect.FileDescriptor -// UnimplementedVersionServer can be embedded to have forward compatible implementations. -type UnimplementedVersionServer struct { -} - -func (*UnimplementedVersionServer) Version(ctx context.Context, req *types.Empty) (*VersionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") -} - -func RegisterVersionServer(s *grpc.Server, srv VersionServer) { - s.RegisterService(&_Version_serviceDesc, srv) -} - -func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(types.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(VersionServer).Version(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.version.v1.Version/Version", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VersionServer).Version(ctx, req.(*types.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -var _Version_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.version.v1.Version", - HandlerType: (*VersionServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Version", - Handler: _Version_Version_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/version/v1/version.proto", -} - -func (m *VersionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VersionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VersionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Revision) > 0 { - i -= len(m.Revision) - copy(dAtA[i:], m.Revision) - i = encodeVarintVersion(dAtA, i, uint64(len(m.Revision))) - i-- - dAtA[i] = 0x12 - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintVersion(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintVersion(dAtA []byte, offset int, v uint64) int { - offset -= sovVersion(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *VersionResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Version) - if l > 0 { - n += 1 + l + sovVersion(uint64(l)) - } - l = len(m.Revision) - if l > 0 { - n += 1 + l + sovVersion(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovVersion(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozVersion(x uint64) (n int) { - return sovVersion(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *VersionResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VersionResponse{`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringVersion(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *VersionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowVersion - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VersionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VersionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowVersion - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthVersion - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthVersion - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowVersion - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthVersion - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthVersion - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Revision = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipVersion(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthVersion - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipVersion(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowVersion - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowVersion - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowVersion - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthVersion - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupVersion - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthVersion - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDesc = []byte{ + 0x0a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x47, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x32, 0x5d, + 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x07, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x2f, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, + 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x3b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthVersion = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowVersion = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupVersion = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescData = file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_services_version_v1_version_proto_goTypes = []interface{}{ + (*VersionResponse)(nil), // 0: containerd.services.version.v1.VersionResponse + (*emptypb.Empty)(nil), // 1: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_version_v1_version_proto_depIdxs = []int32{ + 1, // 0: containerd.services.version.v1.Version.Version:input_type -> google.protobuf.Empty + 0, // 1: containerd.services.version.v1.Version.Version:output_type -> containerd.services.version.v1.VersionResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_version_v1_version_proto_init() } +func file_github_com_containerd_containerd_api_services_version_v1_version_proto_init() { + if File_github_com_containerd_containerd_api_services_version_v1_version_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_version_v1_version_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_version_v1_version_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_version_v1_version_proto = out.File + file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_version_v1_version_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_version_v1_version_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto b/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto index 97681bb86e..bd948ff343 100644 --- a/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto +++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto @@ -19,7 +19,6 @@ syntax = "proto3"; package containerd.services.version.v1; import "google/protobuf/empty.proto"; -import weak "gogoproto/gogo.proto"; // TODO(stevvooe): Should version service actually be versioned? option go_package = "github.com/containerd/containerd/api/services/version/v1;version"; diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/version/v1/version_grpc.pb.go new file mode 100644 index 0000000000..4070fd8347 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version_grpc.pb.go @@ -0,0 +1,106 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/version/v1/version.proto + +package version + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// VersionClient is the client API for Version service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type VersionClient interface { + Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VersionResponse, error) +} + +type versionClient struct { + cc grpc.ClientConnInterface +} + +func NewVersionClient(cc grpc.ClientConnInterface) VersionClient { + return &versionClient{cc} +} + +func (c *versionClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VersionResponse, error) { + out := new(VersionResponse) + err := c.cc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// VersionServer is the server API for Version service. +// All implementations must embed UnimplementedVersionServer +// for forward compatibility +type VersionServer interface { + Version(context.Context, *emptypb.Empty) (*VersionResponse, error) + mustEmbedUnimplementedVersionServer() +} + +// UnimplementedVersionServer must be embedded to have forward compatible implementations. +type UnimplementedVersionServer struct { +} + +func (UnimplementedVersionServer) Version(context.Context, *emptypb.Empty) (*VersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") +} +func (UnimplementedVersionServer) mustEmbedUnimplementedVersionServer() {} + +// UnsafeVersionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to VersionServer will +// result in compilation errors. +type UnsafeVersionServer interface { + mustEmbedUnimplementedVersionServer() +} + +func RegisterVersionServer(s grpc.ServiceRegistrar, srv VersionServer) { + s.RegisterService(&Version_ServiceDesc, srv) +} + +func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VersionServer).Version(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.version.v1.Version/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VersionServer).Version(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// Version_ServiceDesc is the grpc.ServiceDesc for Version service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Version_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.version.v1.Version", + HandlerType: (*VersionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Version", + Handler: _Version_Version_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/version/v1/version.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go index fe71dbf433..f3db1c52d9 100644 --- a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go @@ -1,30 +1,39 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/types/descriptor.proto package types import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Descriptor describes a blob in a content store. // @@ -32,567 +41,166 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // oci descriptor found in a manifest. // See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor type Descriptor struct { - MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` - Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + Digest string `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` + Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Descriptor) Reset() { + *x = Descriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Descriptor) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Descriptor) Reset() { *m = Descriptor{} } func (*Descriptor) ProtoMessage() {} + +func (x *Descriptor) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Descriptor.ProtoReflect.Descriptor instead. func (*Descriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_37f958df3707db9e, []int{0} -} -func (m *Descriptor) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Descriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Descriptor.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Descriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_Descriptor.Merge(m, src) -} -func (m *Descriptor) XXX_Size() int { - return m.Size() -} -func (m *Descriptor) XXX_DiscardUnknown() { - xxx_messageInfo_Descriptor.DiscardUnknown(m) + return file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescGZIP(), []int{0} } -var xxx_messageInfo_Descriptor proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Descriptor)(nil), "containerd.types.Descriptor") - proto.RegisterMapType((map[string]string)(nil), "containerd.types.Descriptor.AnnotationsEntry") +func (x *Descriptor) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptor_37f958df3707db9e) +func (x *Descriptor) GetDigest() string { + if x != nil { + return x.Digest + } + return "" } -var fileDescriptor_37f958df3707db9e = []byte{ - // 311 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16, - 0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, - 0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, - 0x3a, 0xa5, 0x39, 0x4c, 0x5c, 0x5c, 0x2e, 0x70, 0xcd, 0x42, 0xb2, 0x5c, 0x5c, 0xb9, 0xa9, 0x29, - 0x99, 0x89, 0xf1, 0x20, 0x3d, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x9c, 0x60, 0x91, 0x90, - 0xca, 0x82, 0x54, 0x21, 0x2f, 0x2e, 0xb6, 0x94, 0xcc, 0xf4, 0xd4, 0xe2, 0x12, 0x09, 0x26, 0x90, - 0x94, 0x93, 0xd1, 0x89, 0x7b, 0xf2, 0x0c, 0xb7, 0xee, 0xc9, 0x6b, 0x21, 0x39, 0x35, 0xbf, 0x20, - 0x35, 0x0f, 0x6e, 0x79, 0xb1, 0x7e, 0x7a, 0xbe, 0x2e, 0x44, 0x8b, 0x9e, 0x0b, 0x98, 0x0a, 0x82, - 0x9a, 0x20, 0x24, 0xc4, 0xc5, 0x52, 0x9c, 0x59, 0x95, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x1c, - 0x04, 0x66, 0x0b, 0xf9, 0x73, 0x71, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, - 0x15, 0x4b, 0xb0, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xe9, 0xea, 0xa1, 0xfb, 0x45, 0x0f, 0xe1, 0x62, - 0x3d, 0x47, 0x84, 0x7a, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x64, 0x13, 0xa4, 0xec, 0xb8, 0x04, - 0xd0, 0x15, 0x08, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x3d, 0x07, 0x62, 0x0a, 0x89, 0x70, - 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x42, 0x7c, 0x15, 0x04, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x3a, - 0x79, 0x9d, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f, - 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x31, 0xca, 0x80, 0xf8, 0xd8, 0xb1, - 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0xe0, 0x30, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x22, - 0x8a, 0x20, 0x4a, 0xda, 0x01, 0x00, 0x00, +func (x *Descriptor) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 } -func (m *Descriptor) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Descriptor) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Annotations) > 0 { - for k := range m.Annotations { - v := m.Annotations[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintDescriptor(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintDescriptor(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintDescriptor(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a - } - } - if m.Size_ != 0 { - i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x18 - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintDescriptor(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0x12 - } - if len(m.MediaType) > 0 { - i -= len(m.MediaType) - copy(dAtA[i:], m.MediaType) - i = encodeVarintDescriptor(dAtA, i, uint64(len(m.MediaType))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int { - offset -= sovDescriptor(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Descriptor) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.MediaType) - if l > 0 { - n += 1 + l + sovDescriptor(uint64(l)) - } - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovDescriptor(uint64(l)) - } - if m.Size_ != 0 { - n += 1 + sovDescriptor(uint64(m.Size_)) - } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovDescriptor(uint64(len(k))) + 1 + len(v) + sovDescriptor(uint64(len(v))) - n += mapEntrySize + 1 + sovDescriptor(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovDescriptor(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozDescriptor(x uint64) (n int) { - return sovDescriptor(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Descriptor) String() string { - if this == nil { - return "nil" - } - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k, _ := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - s := strings.Join([]string{`&Descriptor{`, - `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringDescriptor(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Descriptor) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Descriptor: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Descriptor: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDescriptor - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDescriptor - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MediaType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDescriptor - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDescriptor - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDescriptor - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDescriptor - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthDescriptor - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthDescriptor - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthDescriptor - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthDescriptor - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipDescriptor(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDescriptor - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDescriptor(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDescriptor - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Descriptor) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations } return nil } -func skipDescriptor(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDescriptor - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDescriptor - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDescriptor - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDescriptor - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupDescriptor - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthDescriptor - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + +var File_github_com_containerd_containerd_api_types_descriptor_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, + 0xe8, 0x01, 0x0a, 0x0a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x1d, + 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x4f, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDescriptor = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupDescriptor = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescData = file_github_com_containerd_containerd_api_types_descriptor_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_github_com_containerd_containerd_api_types_descriptor_proto_goTypes = []interface{}{ + (*Descriptor)(nil), // 0: containerd.types.Descriptor + nil, // 1: containerd.types.Descriptor.AnnotationsEntry +} +var file_github_com_containerd_containerd_api_types_descriptor_proto_depIdxs = []int32{ + 1, // 0: containerd.types.Descriptor.annotations:type_name -> containerd.types.Descriptor.AnnotationsEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_descriptor_proto_init() } +func file_github_com_containerd_containerd_api_types_descriptor_proto_init() { + if File_github_com_containerd_containerd_api_types_descriptor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Descriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_descriptor_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_descriptor_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_descriptor_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_descriptor_proto = out.File + file_github_com_containerd_containerd_api_types_descriptor_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_descriptor_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_descriptor_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.proto b/vendor/github.com/containerd/containerd/api/types/descriptor.proto index a841d1bb22..faaf416dd1 100644 --- a/vendor/github.com/containerd/containerd/api/types/descriptor.proto +++ b/vendor/github.com/containerd/containerd/api/types/descriptor.proto @@ -18,8 +18,6 @@ syntax = "proto3"; package containerd.types; -import weak "gogoproto/gogo.proto"; - option go_package = "github.com/containerd/containerd/api/types;types"; // Descriptor describes a blob in a content store. @@ -29,7 +27,7 @@ option go_package = "github.com/containerd/containerd/api/types;types"; // See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor message Descriptor { string media_type = 1; - string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 2; int64 size = 3; map annotations = 5; } diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go index 75773e442a..b18ce1c5b6 100644 --- a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go @@ -1,450 +1,194 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/types/metrics.proto package types import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Metric struct { - Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Data *types.Any `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Data *anypb.Any `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Metric) Reset() { *m = Metric{} } func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_8d594d87edf6e6bc, []int{0} -} -func (m *Metric) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return m.Size() -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) + return file_github_com_containerd_containerd_api_types_metrics_proto_rawDescGZIP(), []int{0} } -var xxx_messageInfo_Metric proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Metric)(nil), "containerd.types.Metric") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptor_8d594d87edf6e6bc) -} - -var fileDescriptor_8d594d87edf6e6bc = []byte{ - // 258 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96, - 0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81, - 0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64, - 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09, - 0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50, - 0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54, - 0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70, - 0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e, - 0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c, - 0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1, - 0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c, - 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, - 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24, - 0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01, - 0x00, 0x00, -} - -func (m *Metric) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metric) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Data != nil { - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0x12 - } - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintMetrics(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { - offset -= sovMetrics(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Metric) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovMetrics(uint64(l)) - l = len(m.ID) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Data != nil { - l = m.Data.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovMetrics(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMetrics(x uint64) (n int) { - return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Metric) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Metric{`, - `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringMetrics(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Metric) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metric: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Data == nil { - m.Data = &types.Any{} - } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Metric) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } -func skipMetrics(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMetrics - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMetrics - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMetrics - } - if depth == 0 { - return iNdEx, nil - } + +func (x *Metric) GetID() string { + if x != nil { + return x.ID } - return 0, io.ErrUnexpectedEOF + return "" +} + +func (x *Metric) GetData() *anypb.Any { + if x != nil { + return x.Data + } + return nil +} + +var File_github_com_containerd_containerd_api_types_metrics_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_metrics_proto_rawDesc = []byte{ + 0x0a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7c, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x28, 0x0a, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( - ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_types_metrics_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_metrics_proto_rawDescData = file_github_com_containerd_containerd_api_types_metrics_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_types_metrics_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_metrics_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_metrics_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_metrics_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_types_metrics_proto_goTypes = []interface{}{ + (*Metric)(nil), // 0: containerd.types.Metric + (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp + (*anypb.Any)(nil), // 2: google.protobuf.Any +} +var file_github_com_containerd_containerd_api_types_metrics_proto_depIdxs = []int32{ + 1, // 0: containerd.types.Metric.timestamp:type_name -> google.protobuf.Timestamp + 2, // 1: containerd.types.Metric.data:type_name -> google.protobuf.Any + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_metrics_proto_init() } +func file_github_com_containerd_containerd_api_types_metrics_proto_init() { + if File_github_com_containerd_containerd_api_types_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_metrics_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_metrics_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_metrics_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_metrics_proto = out.File + file_github_com_containerd_containerd_api_types_metrics_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_metrics_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_metrics_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.proto b/vendor/github.com/containerd/containerd/api/types/metrics.proto index b8bc673267..3e6a7751e3 100644 --- a/vendor/github.com/containerd/containerd/api/types/metrics.proto +++ b/vendor/github.com/containerd/containerd/api/types/metrics.proto @@ -18,14 +18,13 @@ syntax = "proto3"; package containerd.types; -import weak "gogoproto/gogo.proto"; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message Metric { - google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 1; string id = 2; google.protobuf.Any data = 3; } diff --git a/vendor/github.com/containerd/containerd/api/types/mount.pb.go b/vendor/github.com/containerd/containerd/api/types/mount.pb.go index d0a0bee761..ff77a7d7bd 100644 --- a/vendor/github.com/containerd/containerd/api/types/mount.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/mount.pb.go @@ -1,28 +1,39 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/types/mount.proto package types import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Mount describes mounts for a container. // @@ -32,6 +43,10 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // The Mount type follows the structure of the mount syscall, including a type, // source, target and options. type Mount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Type defines the nature of the mount. Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Source specifies the name of the mount. Depending on mount type, this @@ -40,455 +55,148 @@ type Mount struct { // Target path in container Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` // Options specifies zero or more fstab style mount options. - Options []string `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Options []string `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` +} + +func (x *Mount) Reset() { + *x = Mount{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_mount_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Mount) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Mount) Reset() { *m = Mount{} } func (*Mount) ProtoMessage() {} + +func (x *Mount) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_mount_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Mount.ProtoReflect.Descriptor instead. func (*Mount) Descriptor() ([]byte, []int) { - return fileDescriptor_920196890d4a7b9f, []int{0} -} -func (m *Mount) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Mount.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Mount) XXX_Merge(src proto.Message) { - xxx_messageInfo_Mount.Merge(m, src) -} -func (m *Mount) XXX_Size() int { - return m.Size() -} -func (m *Mount) XXX_DiscardUnknown() { - xxx_messageInfo_Mount.DiscardUnknown(m) + return file_github_com_containerd_containerd_api_types_mount_proto_rawDescGZIP(), []int{0} } -var xxx_messageInfo_Mount proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Mount)(nil), "containerd.types.Mount") +func (x *Mount) GetType() string { + if x != nil { + return x.Type + } + return "" } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptor_920196890d4a7b9f) +func (x *Mount) GetSource() string { + if x != nil { + return x.Source + } + return "" } -var fileDescriptor_920196890d4a7b9f = []byte{ - // 202 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97, - 0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5, - 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab, - 0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, - 0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85, - 0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48, - 0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33, - 0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, - 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01, - 0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00, - 0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00, +func (x *Mount) GetTarget() string { + if x != nil { + return x.Target + } + return "" } -func (m *Mount) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Mount) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Options) > 0 { - for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Options[iNdEx]) - copy(dAtA[i:], m.Options[iNdEx]) - i = encodeVarintMount(dAtA, i, uint64(len(m.Options[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Target) > 0 { - i -= len(m.Target) - copy(dAtA[i:], m.Target) - i = encodeVarintMount(dAtA, i, uint64(len(m.Target))) - i-- - dAtA[i] = 0x1a - } - if len(m.Source) > 0 { - i -= len(m.Source) - copy(dAtA[i:], m.Source) - i = encodeVarintMount(dAtA, i, uint64(len(m.Source))) - i-- - dAtA[i] = 0x12 - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintMount(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintMount(dAtA []byte, offset int, v uint64) int { - offset -= sovMount(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Mount) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovMount(uint64(l)) - } - l = len(m.Source) - if l > 0 { - n += 1 + l + sovMount(uint64(l)) - } - l = len(m.Target) - if l > 0 { - n += 1 + l + sovMount(uint64(l)) - } - if len(m.Options) > 0 { - for _, s := range m.Options { - l = len(s) - n += 1 + l + sovMount(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovMount(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMount(x uint64) (n int) { - return sovMount(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Mount) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Mount{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Source:` + fmt.Sprintf("%v", this.Source) + `,`, - `Target:` + fmt.Sprintf("%v", this.Target) + `,`, - `Options:` + fmt.Sprintf("%v", this.Options) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringMount(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Mount) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMount - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Mount: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMount - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMount - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMount - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMount - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMount - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMount - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Source = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMount - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMount - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMount - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Target = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMount - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMount - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMount - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Options = append(m.Options, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMount(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMount - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Mount) GetOptions() []string { + if x != nil { + return x.Options } return nil } -func skipMount(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMount - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMount - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMount - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMount - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMount - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMount - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + +var File_github_com_containerd_containerd_api_types_mount_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_mount_proto_rawDesc = []byte{ + 0x0a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x65, 0x0a, 0x05, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMount = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMount = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_types_mount_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_mount_proto_rawDescData = file_github_com_containerd_containerd_api_types_mount_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_types_mount_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_mount_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_mount_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_mount_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_mount_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_mount_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_types_mount_proto_goTypes = []interface{}{ + (*Mount)(nil), // 0: containerd.types.Mount +} +var file_github_com_containerd_containerd_api_types_mount_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_mount_proto_init() } +func file_github_com_containerd_containerd_api_types_mount_proto_init() { + if File_github_com_containerd_containerd_api_types_mount_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_mount_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Mount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_mount_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_mount_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_mount_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_mount_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_mount_proto = out.File + file_github_com_containerd_containerd_api_types_mount_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_mount_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_mount_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/mount.proto b/vendor/github.com/containerd/containerd/api/types/mount.proto index 41ab133138..54e0a0cddf 100644 --- a/vendor/github.com/containerd/containerd/api/types/mount.proto +++ b/vendor/github.com/containerd/containerd/api/types/mount.proto @@ -18,8 +18,6 @@ syntax = "proto3"; package containerd.types; -import weak "gogoproto/gogo.proto"; - option go_package = "github.com/containerd/containerd/api/types;types"; // Mount describes mounts for a container. diff --git a/vendor/github.com/containerd/containerd/api/types/platform.pb.go b/vendor/github.com/containerd/containerd/api/types/platform.pb.go index a0f78c8a76..3e206cbafb 100644 --- a/vendor/github.com/containerd/containerd/api/types/platform.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/platform.pb.go @@ -1,435 +1,184 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/types/platform.proto package types import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Platform follows the structure of the OCI platform specification, from // descriptors. type Platform struct { - OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"` - Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"` - Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"` + Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"` + Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"` +} + +func (x *Platform) Reset() { + *x = Platform{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_platform_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Platform) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Platform) Reset() { *m = Platform{} } func (*Platform) ProtoMessage() {} + +func (x *Platform) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_platform_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Platform.ProtoReflect.Descriptor instead. func (*Platform) Descriptor() ([]byte, []int) { - return fileDescriptor_24ba7a4b83e2367e, []int{0} -} -func (m *Platform) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Platform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Platform.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Platform) XXX_Merge(src proto.Message) { - xxx_messageInfo_Platform.Merge(m, src) -} -func (m *Platform) XXX_Size() int { - return m.Size() -} -func (m *Platform) XXX_DiscardUnknown() { - xxx_messageInfo_Platform.DiscardUnknown(m) + return file_github_com_containerd_containerd_api_types_platform_proto_rawDescGZIP(), []int{0} } -var xxx_messageInfo_Platform proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Platform)(nil), "containerd.types.Platform") +func (x *Platform) GetOS() string { + if x != nil { + return x.OS + } + return "" } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptor_24ba7a4b83e2367e) +func (x *Platform) GetArchitecture() string { + if x != nil { + return x.Architecture + } + return "" } -var fileDescriptor_24ba7a4b83e2367e = []byte{ - // 205 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24, - 0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9, - 0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5, - 0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94, - 0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98, - 0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a, - 0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, - 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18, - 0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00, +func (x *Platform) GetVariant() string { + if x != nil { + return x.Variant + } + return "" } -func (m *Platform) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +var File_github_com_containerd_containerd_api_types_platform_proto protoreflect.FileDescriptor -func (m *Platform) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Platform) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Variant) > 0 { - i -= len(m.Variant) - copy(dAtA[i:], m.Variant) - i = encodeVarintPlatform(dAtA, i, uint64(len(m.Variant))) - i-- - dAtA[i] = 0x1a - } - if len(m.Architecture) > 0 { - i -= len(m.Architecture) - copy(dAtA[i:], m.Architecture) - i = encodeVarintPlatform(dAtA, i, uint64(len(m.Architecture))) - i-- - dAtA[i] = 0x12 - } - if len(m.OS) > 0 { - i -= len(m.OS) - copy(dAtA[i:], m.OS) - i = encodeVarintPlatform(dAtA, i, uint64(len(m.OS))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintPlatform(dAtA []byte, offset int, v uint64) int { - offset -= sovPlatform(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Platform) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.OS) - if l > 0 { - n += 1 + l + sovPlatform(uint64(l)) - } - l = len(m.Architecture) - if l > 0 { - n += 1 + l + sovPlatform(uint64(l)) - } - l = len(m.Variant) - if l > 0 { - n += 1 + l + sovPlatform(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovPlatform(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPlatform(x uint64) (n int) { - return sovPlatform(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Platform) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Platform{`, - `OS:` + fmt.Sprintf("%v", this.OS) + `,`, - `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, - `Variant:` + fmt.Sprintf("%v", this.Variant) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringPlatform(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Platform) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlatform - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Platform: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlatform - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlatform - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlatform - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OS = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlatform - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlatform - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlatform - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Architecture = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlatform - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlatform - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlatform - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Variant = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlatform(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlatform - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlatform(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlatform - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlatform - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlatform - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPlatform - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPlatform - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPlatform - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_api_types_platform_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x58, 0x0a, + 0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x72, 0x63, + 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthPlatform = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlatform = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPlatform = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_types_platform_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_platform_proto_rawDescData = file_github_com_containerd_containerd_api_types_platform_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_types_platform_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_platform_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_platform_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_platform_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_platform_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_platform_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_types_platform_proto_goTypes = []interface{}{ + (*Platform)(nil), // 0: containerd.types.Platform +} +var file_github_com_containerd_containerd_api_types_platform_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_platform_proto_init() } +func file_github_com_containerd_containerd_api_types_platform_proto_init() { + if File_github_com_containerd_containerd_api_types_platform_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_platform_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Platform); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_platform_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_platform_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_platform_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_platform_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_platform_proto = out.File + file_github_com_containerd_containerd_api_types_platform_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_platform_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_platform_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/platform.proto b/vendor/github.com/containerd/containerd/api/types/platform.proto index 7813606841..b6088251f0 100644 --- a/vendor/github.com/containerd/containerd/api/types/platform.proto +++ b/vendor/github.com/containerd/containerd/api/types/platform.proto @@ -18,14 +18,12 @@ syntax = "proto3"; package containerd.types; -import weak "gogoproto/gogo.proto"; - option go_package = "github.com/containerd/containerd/api/types;types"; // Platform follows the structure of the OCI platform specification, from // descriptors. message Platform { - string os = 1 [(gogoproto.customname) = "OS"]; + string os = 1; string architecture = 2; string variant = 3; } diff --git a/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go b/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go new file mode 100644 index 0000000000..67594f416c --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go @@ -0,0 +1,346 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/types/sandbox.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Sandbox represents a sandbox metadata object that keeps all info required by controller to +// work with a particular instance. +type Sandbox struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // SandboxID is a unique instance identifier within namespace + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + // Runtime specifies which runtime to use for executing this container. + Runtime *Sandbox_Runtime `protobuf:"bytes,2,opt,name=runtime,proto3" json:"runtime,omitempty"` + // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the + // bundle directory (similary to OCI spec). + Spec *anypb.Any `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec,omitempty"` + // Labels provides an area to include arbitrary data on containers. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // CreatedAt is the time the container was first created. + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // UpdatedAt is the last time the container was mutated. + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + // Extensions allow clients to provide optional blobs that can be handled by runtime. + Extensions map[string]*anypb.Any `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Sandbox) Reset() { + *x = Sandbox{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Sandbox) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Sandbox) ProtoMessage() {} + +func (x *Sandbox) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Sandbox.ProtoReflect.Descriptor instead. +func (*Sandbox) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescGZIP(), []int{0} +} + +func (x *Sandbox) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *Sandbox) GetRuntime() *Sandbox_Runtime { + if x != nil { + return x.Runtime + } + return nil +} + +func (x *Sandbox) GetSpec() *anypb.Any { + if x != nil { + return x.Spec + } + return nil +} + +func (x *Sandbox) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Sandbox) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *Sandbox) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *Sandbox) GetExtensions() map[string]*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + +type Sandbox_Runtime struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name is the name of the runtime. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Options specify additional runtime initialization options for the shim (this data will be available in StartShim). + // Typically this data expected to be runtime shim implementation specific. + Options *anypb.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *Sandbox_Runtime) Reset() { + *x = Sandbox_Runtime{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Sandbox_Runtime) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Sandbox_Runtime) ProtoMessage() {} + +func (x *Sandbox_Runtime) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Sandbox_Runtime.ProtoReflect.Descriptor instead. +func (*Sandbox_Runtime) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Sandbox_Runtime) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Sandbox_Runtime) GetOptions() *anypb.Any { + if x != nil { + return x.Options + } + return nil +} + +var File_github_com_containerd_containerd_api_types_sandbox_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc = []byte{ + 0x0a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x04, 0x0a, 0x07, 0x53, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, + 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x12, 0x28, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, + 0x49, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x4d, 0x0a, 0x07, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x53, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescData = file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_github_com_containerd_containerd_api_types_sandbox_proto_goTypes = []interface{}{ + (*Sandbox)(nil), // 0: containerd.types.Sandbox + (*Sandbox_Runtime)(nil), // 1: containerd.types.Sandbox.Runtime + nil, // 2: containerd.types.Sandbox.LabelsEntry + nil, // 3: containerd.types.Sandbox.ExtensionsEntry + (*anypb.Any)(nil), // 4: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp +} +var file_github_com_containerd_containerd_api_types_sandbox_proto_depIdxs = []int32{ + 1, // 0: containerd.types.Sandbox.runtime:type_name -> containerd.types.Sandbox.Runtime + 4, // 1: containerd.types.Sandbox.spec:type_name -> google.protobuf.Any + 2, // 2: containerd.types.Sandbox.labels:type_name -> containerd.types.Sandbox.LabelsEntry + 5, // 3: containerd.types.Sandbox.created_at:type_name -> google.protobuf.Timestamp + 5, // 4: containerd.types.Sandbox.updated_at:type_name -> google.protobuf.Timestamp + 3, // 5: containerd.types.Sandbox.extensions:type_name -> containerd.types.Sandbox.ExtensionsEntry + 4, // 6: containerd.types.Sandbox.Runtime.options:type_name -> google.protobuf.Any + 4, // 7: containerd.types.Sandbox.ExtensionsEntry.value:type_name -> google.protobuf.Any + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_sandbox_proto_init() } +func file_github_com_containerd_containerd_api_types_sandbox_proto_init() { + if File_github_com_containerd_containerd_api_types_sandbox_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Sandbox); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Sandbox_Runtime); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_sandbox_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_sandbox_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_sandbox_proto = out.File + file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_sandbox_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_sandbox_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/sandbox.proto b/vendor/github.com/containerd/containerd/api/types/sandbox.proto new file mode 100644 index 0000000000..b607706194 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/sandbox.proto @@ -0,0 +1,51 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.types; + +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/containerd/containerd/api/types;types"; + +// Sandbox represents a sandbox metadata object that keeps all info required by controller to +// work with a particular instance. +message Sandbox { + // SandboxID is a unique instance identifier within namespace + string sandbox_id = 1; + message Runtime { + // Name is the name of the runtime. + string name = 1; + // Options specify additional runtime initialization options for the shim (this data will be available in StartShim). + // Typically this data expected to be runtime shim implementation specific. + google.protobuf.Any options = 2; + } + // Runtime specifies which runtime to use for executing this container. + Runtime runtime = 2; + // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the + // bundle directory (similary to OCI spec). + google.protobuf.Any spec = 3; + // Labels provides an area to include arbitrary data on containers. + map labels = 4; + // CreatedAt is the time the container was first created. + google.protobuf.Timestamp created_at = 5; + // UpdatedAt is the last time the container was mutated. + google.protobuf.Timestamp updated_at = 6; + // Extensions allow clients to provide optional blobs that can be handled by runtime. + map extensions = 7; +} diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go index f511bbd058..5c58d1ef18 100644 --- a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go @@ -1,980 +1,406 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/types/task/task.proto package task import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Status int32 const ( - StatusUnknown Status = 0 - StatusCreated Status = 1 - StatusRunning Status = 2 - StatusStopped Status = 3 - StatusPaused Status = 4 - StatusPausing Status = 5 + Status_UNKNOWN Status = 0 + Status_CREATED Status = 1 + Status_RUNNING Status = 2 + Status_STOPPED Status = 3 + Status_PAUSED Status = 4 + Status_PAUSING Status = 5 ) -var Status_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CREATED", - 2: "RUNNING", - 3: "STOPPED", - 4: "PAUSED", - 5: "PAUSING", -} +// Enum value maps for Status. +var ( + Status_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATED", + 2: "RUNNING", + 3: "STOPPED", + 4: "PAUSED", + 5: "PAUSING", + } + Status_value = map[string]int32{ + "UNKNOWN": 0, + "CREATED": 1, + "RUNNING": 2, + "STOPPED": 3, + "PAUSED": 4, + "PAUSING": 5, + } +) -var Status_value = map[string]int32{ - "UNKNOWN": 0, - "CREATED": 1, - "RUNNING": 2, - "STOPPED": 3, - "PAUSED": 4, - "PAUSING": 5, +func (x Status) Enum() *Status { + p := new(Status) + *p = x + return p } func (x Status) String() string { - return proto.EnumName(Status_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } +func (Status) Descriptor() protoreflect.EnumDescriptor { + return file_github_com_containerd_containerd_api_types_task_task_proto_enumTypes[0].Descriptor() +} + +func (Status) Type() protoreflect.EnumType { + return &file_github_com_containerd_containerd_api_types_task_task_proto_enumTypes[0] +} + +func (x Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Status.Descriptor instead. func (Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_391ef18c8ab0dc16, []int{0} + return file_github_com_containerd_containerd_api_types_task_task_proto_rawDescGZIP(), []int{0} } type Process struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` - Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` - Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` - ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` + Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` + ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` } -func (m *Process) Reset() { *m = Process{} } -func (*Process) ProtoMessage() {} -func (*Process) Descriptor() ([]byte, []int) { - return fileDescriptor_391ef18c8ab0dc16, []int{0} -} -func (m *Process) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Process) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Process.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Process) Reset() { + *x = Process{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Process) XXX_Merge(src proto.Message) { - xxx_messageInfo_Process.Merge(m, src) -} -func (m *Process) XXX_Size() int { - return m.Size() -} -func (m *Process) XXX_DiscardUnknown() { - xxx_messageInfo_Process.DiscardUnknown(m) + +func (x *Process) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_Process proto.InternalMessageInfo +func (*Process) ProtoMessage() {} + +func (x *Process) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Process.ProtoReflect.Descriptor instead. +func (*Process) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_task_task_proto_rawDescGZIP(), []int{0} +} + +func (x *Process) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *Process) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Process) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *Process) GetStatus() Status { + if x != nil { + return x.Status + } + return Status_UNKNOWN +} + +func (x *Process) GetStdin() string { + if x != nil { + return x.Stdin + } + return "" +} + +func (x *Process) GetStdout() string { + if x != nil { + return x.Stdout + } + return "" +} + +func (x *Process) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" +} + +func (x *Process) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +func (x *Process) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *Process) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} type ProcessInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // PID is the process ID. Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` // Info contains additional process information. // // Info varies by platform. - Info *types.Any `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Info *anypb.Any `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` +} + +func (x *ProcessInfo) Reset() { + *x = ProcessInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProcessInfo) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ProcessInfo) Reset() { *m = ProcessInfo{} } func (*ProcessInfo) ProtoMessage() {} + +func (x *ProcessInfo) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessInfo.ProtoReflect.Descriptor instead. func (*ProcessInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_391ef18c8ab0dc16, []int{1} -} -func (m *ProcessInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProcessInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProcessInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProcessInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProcessInfo.Merge(m, src) -} -func (m *ProcessInfo) XXX_Size() int { - return m.Size() -} -func (m *ProcessInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ProcessInfo.DiscardUnknown(m) + return file_github_com_containerd_containerd_api_types_task_task_proto_rawDescGZIP(), []int{1} } -var xxx_messageInfo_ProcessInfo proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value) - proto.RegisterType((*Process)(nil), "containerd.v1.types.Process") - proto.RegisterType((*ProcessInfo)(nil), "containerd.v1.types.ProcessInfo") +func (x *ProcessInfo) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptor_391ef18c8ab0dc16) -} - -var fileDescriptor_391ef18c8ab0dc16 = []byte{ - // 545 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40, - 0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64, - 0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13, - 0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c, - 0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39, - 0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66, - 0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26, - 0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79, - 0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31, - 0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9, - 0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50, - 0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28, - 0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23, - 0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0, - 0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda, - 0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a, - 0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11, - 0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55, - 0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab, - 0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a, - 0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50, - 0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82, - 0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb, - 0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19, - 0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf, - 0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce, - 0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f, - 0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47, - 0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94, - 0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc, - 0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f, - 0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8, - 0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9, - 0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00, - 0x00, -} - -func (m *Process) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Process) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Process) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintTask(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x52 - if m.ExitStatus != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x48 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x3a - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x32 - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0x2a - } - if m.Status != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x20 - } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x18 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProcessInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProcessInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProcessInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Info != nil { - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTask(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintTask(dAtA []byte, offset int, v uint64) int { - offset -= sovTask(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Process) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.ID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) - } - if m.Status != 0 { - n += 1 + sovTask(uint64(m.Status)) - } - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Terminal { - n += 2 - } - if m.ExitStatus != 0 { - n += 1 + sovTask(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovTask(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ProcessInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) - } - if m.Info != nil { - l = m.Info.Size() - n += 1 + l + sovTask(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovTask(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTask(x uint64) (n int) { - return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Process) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Process{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ProcessInfo) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ProcessInfo{`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringTask(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Process) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Process: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Process: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= Status(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *ProcessInfo) GetInfo() *anypb.Any { + if x != nil { + return x.Info } return nil } -func (m *ProcessInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProcessInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProcessInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Info == nil { - m.Info = &types.Any{} - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTask(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTask - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTask - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTask - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var File_github_com_containerd_containerd_api_types_task_task_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_task_task_proto_rawDesc = []byte{ + 0x0a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x61, 0x73, + 0x6b, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbf, 0x02, + 0x0a, 0x07, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, + 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x33, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, + 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, + 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, + 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, + 0x5f, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, + 0x49, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, + 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, + 0x12, 0x28, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x2a, 0x55, 0x0a, 0x06, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, + 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x53, + 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x55, 0x53, + 0x45, 0x44, 0x10, 0x04, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x41, 0x55, 0x53, 0x49, 0x4e, 0x47, 0x10, + 0x05, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x74, 0x61, 0x73, 0x6b, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTask = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTask = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_types_task_task_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_task_task_proto_rawDescData = file_github_com_containerd_containerd_api_types_task_task_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_types_task_task_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_task_task_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_task_task_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_task_task_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_task_task_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_task_task_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_github_com_containerd_containerd_api_types_task_task_proto_goTypes = []interface{}{ + (Status)(0), // 0: containerd.v1.types.Status + (*Process)(nil), // 1: containerd.v1.types.Process + (*ProcessInfo)(nil), // 2: containerd.v1.types.ProcessInfo + (*timestamppb.Timestamp)(nil), // 3: google.protobuf.Timestamp + (*anypb.Any)(nil), // 4: google.protobuf.Any +} +var file_github_com_containerd_containerd_api_types_task_task_proto_depIdxs = []int32{ + 0, // 0: containerd.v1.types.Process.status:type_name -> containerd.v1.types.Status + 3, // 1: containerd.v1.types.Process.exited_at:type_name -> google.protobuf.Timestamp + 4, // 2: containerd.v1.types.ProcessInfo.info:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_task_task_proto_init() } +func file_github_com_containerd_containerd_api_types_task_task_proto_init() { + if File_github_com_containerd_containerd_api_types_task_task_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Process); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_task_task_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_task_task_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_task_task_proto_depIdxs, + EnumInfos: file_github_com_containerd_containerd_api_types_task_task_proto_enumTypes, + MessageInfos: file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_task_task_proto = out.File + file_github_com_containerd_containerd_api_types_task_task_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_task_task_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_task_task_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.proto b/vendor/github.com/containerd/containerd/api/types/task/task.proto index df08dfd99d..afc8e94bb4 100644 --- a/vendor/github.com/containerd/containerd/api/types/task/task.proto +++ b/vendor/github.com/containerd/containerd/api/types/task/task.proto @@ -18,20 +18,18 @@ syntax = "proto3"; package containerd.v1.types; -import weak "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/any.proto"; -enum Status { - option (gogoproto.goproto_enum_prefix) = false; - option (gogoproto.enum_customname) = "Status"; +option go_package = "github.com/containerd/containerd/api/types/task"; - UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "StatusUnknown"]; - CREATED = 1 [(gogoproto.enumvalue_customname) = "StatusCreated"]; - RUNNING = 2 [(gogoproto.enumvalue_customname) = "StatusRunning"]; - STOPPED = 3 [(gogoproto.enumvalue_customname) = "StatusStopped"]; - PAUSED = 4 [(gogoproto.enumvalue_customname) = "StatusPaused"]; - PAUSING = 5 [(gogoproto.enumvalue_customname) = "StatusPausing"]; +enum Status { + UNKNOWN = 0; + CREATED = 1; + RUNNING = 2; + STOPPED = 3; + PAUSED = 4; + PAUSING = 5; } message Process { @@ -44,7 +42,7 @@ message Process { string stderr = 7; bool terminal = 8; uint32 exit_status = 9; - google.protobuf.Timestamp exited_at = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 10; } message ProcessInfo { diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/doc.go b/vendor/github.com/containerd/containerd/api/types/transfer/doc.go new file mode 100644 index 0000000000..82f94c2f15 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/doc.go @@ -0,0 +1,18 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package transfer defines the transfer types. +package transfer diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.pb.go b/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.pb.go new file mode 100644 index 0000000000..b3c9830411 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.pb.go @@ -0,0 +1,451 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/types/transfer/imagestore.proto + +package transfer + +import ( + types "github.com/containerd/containerd/api/types" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ImageStore struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Platforms []*types.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms,omitempty"` + AllMetadata bool `protobuf:"varint,4,opt,name=all_metadata,json=allMetadata,proto3" json:"all_metadata,omitempty"` + ManifestLimit uint32 `protobuf:"varint,5,opt,name=manifest_limit,json=manifestLimit,proto3" json:"manifest_limit,omitempty"` + // extra_references are used to set image names on imports of sub-images from the index + ExtraReferences []*ImageReference `protobuf:"bytes,6,rep,name=extra_references,json=extraReferences,proto3" json:"extra_references,omitempty"` + Unpacks []*UnpackConfiguration `protobuf:"bytes,10,rep,name=unpacks,proto3" json:"unpacks,omitempty"` +} + +func (x *ImageStore) Reset() { + *x = ImageStore{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImageStore) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImageStore) ProtoMessage() {} + +func (x *ImageStore) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImageStore.ProtoReflect.Descriptor instead. +func (*ImageStore) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescGZIP(), []int{0} +} + +func (x *ImageStore) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ImageStore) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *ImageStore) GetPlatforms() []*types.Platform { + if x != nil { + return x.Platforms + } + return nil +} + +func (x *ImageStore) GetAllMetadata() bool { + if x != nil { + return x.AllMetadata + } + return false +} + +func (x *ImageStore) GetManifestLimit() uint32 { + if x != nil { + return x.ManifestLimit + } + return 0 +} + +func (x *ImageStore) GetExtraReferences() []*ImageReference { + if x != nil { + return x.ExtraReferences + } + return nil +} + +func (x *ImageStore) GetUnpacks() []*UnpackConfiguration { + if x != nil { + return x.Unpacks + } + return nil +} + +type UnpackConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // platform is the platform to unpack for, used for resolving manifest and snapshotter + // if not provided + Platform *types.Platform `protobuf:"bytes,1,opt,name=platform,proto3" json:"platform,omitempty"` + // snapshotter to unpack to, if not provided default for platform shoudl be used + Snapshotter string `protobuf:"bytes,2,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` +} + +func (x *UnpackConfiguration) Reset() { + *x = UnpackConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnpackConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnpackConfiguration) ProtoMessage() {} + +func (x *UnpackConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnpackConfiguration.ProtoReflect.Descriptor instead. +func (*UnpackConfiguration) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescGZIP(), []int{1} +} + +func (x *UnpackConfiguration) GetPlatform() *types.Platform { + if x != nil { + return x.Platform + } + return nil +} + +func (x *UnpackConfiguration) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +// ImageReference is used to create or find a reference for an image +type ImageReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // is_prefix determines whether the Name should be considered + // a prefix (without tag or digest). + // For lookup, this may allow matching multiple tags. + // For store, this must have a tag or digest added. + IsPrefix bool `protobuf:"varint,2,opt,name=is_prefix,json=isPrefix,proto3" json:"is_prefix,omitempty"` + // allow_overwrite allows overwriting or ignoring the name if + // another reference is provided (such as through an annotation). + // Only used if IsPrefix is true. + AllowOverwrite bool `protobuf:"varint,3,opt,name=allow_overwrite,json=allowOverwrite,proto3" json:"allow_overwrite,omitempty"` + // add_digest adds the manifest digest to the reference. + // For lookup, this allows matching tags with any digest. + // For store, this allows adding the digest to the name. + // Only used if IsPrefix is true. + AddDigest bool `protobuf:"varint,4,opt,name=add_digest,json=addDigest,proto3" json:"add_digest,omitempty"` + // skip_named_digest only considers digest references which do not + // have a non-digested named reference. + // For lookup, this will deduplicate digest references when there is a named match. + // For store, this only adds this digest reference when there is no matching full + // name reference from the prefix. + // Only used if IsPrefix is true. + SkipNamedDigest bool `protobuf:"varint,5,opt,name=skip_named_digest,json=skipNamedDigest,proto3" json:"skip_named_digest,omitempty"` +} + +func (x *ImageReference) Reset() { + *x = ImageReference{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImageReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImageReference) ProtoMessage() {} + +func (x *ImageReference) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImageReference.ProtoReflect.Descriptor instead. +func (*ImageReference) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescGZIP(), []int{2} +} + +func (x *ImageReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ImageReference) GetIsPrefix() bool { + if x != nil { + return x.IsPrefix + } + return false +} + +func (x *ImageReference) GetAllowOverwrite() bool { + if x != nil { + return x.AllowOverwrite + } + return false +} + +func (x *ImageReference) GetAddDigest() bool { + if x != nil { + return x.AddDigest + } + return false +} + +func (x *ImageReference) GetSkipNamedDigest() bool { + if x != nil { + return x.SkipNamedDigest + } + return false +} + +var File_github_com_containerd_containerd_api_types_transfer_imagestore_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, + 0x72, 0x1a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xca, 0x03, 0x0a, + 0x0a, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x49, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x49, 0x6d, 0x61, 0x67, + 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x6c, 0x6c, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x6e, 0x69, 0x66, + 0x65, 0x73, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0d, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x54, + 0x0a, 0x10, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x66, 0x65, 0x72, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x07, 0x75, 0x6e, 0x70, 0x61, 0x63, 0x6b, 0x73, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, + 0x72, 0x2e, 0x55, 0x6e, 0x70, 0x61, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x75, 0x6e, 0x70, 0x61, 0x63, 0x6b, 0x73, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6f, 0x0a, 0x13, 0x55, 0x6e, 0x70, + 0x61, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x36, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x22, 0xb5, 0x01, 0x0a, 0x0e, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x27, + 0x0a, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4f, 0x76, + 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x64, 0x64, 0x5f, 0x64, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x64, 0x64, + 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x44, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_goTypes = []interface{}{ + (*ImageStore)(nil), // 0: containerd.types.transfer.ImageStore + (*UnpackConfiguration)(nil), // 1: containerd.types.transfer.UnpackConfiguration + (*ImageReference)(nil), // 2: containerd.types.transfer.ImageReference + nil, // 3: containerd.types.transfer.ImageStore.LabelsEntry + (*types.Platform)(nil), // 4: containerd.types.Platform +} +var file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_depIdxs = []int32{ + 3, // 0: containerd.types.transfer.ImageStore.labels:type_name -> containerd.types.transfer.ImageStore.LabelsEntry + 4, // 1: containerd.types.transfer.ImageStore.platforms:type_name -> containerd.types.Platform + 2, // 2: containerd.types.transfer.ImageStore.extra_references:type_name -> containerd.types.transfer.ImageReference + 1, // 3: containerd.types.transfer.ImageStore.unpacks:type_name -> containerd.types.transfer.UnpackConfiguration + 4, // 4: containerd.types.transfer.UnpackConfiguration.platform:type_name -> containerd.types.Platform + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_init() } +func file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_init() { + if File_github_com_containerd_containerd_api_types_transfer_imagestore_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageStore); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnpackConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_transfer_imagestore_proto = out.File + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.proto b/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.proto new file mode 100644 index 0000000000..57ac2ebde5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.proto @@ -0,0 +1,82 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.types.transfer; + +import "github.com/containerd/containerd/api/types/platform.proto"; + +option go_package = "github.com/containerd/containerd/api/types/transfer"; + +message ImageStore { + string name = 1; + map labels = 2; + + // Content filters + + repeated types.Platform platforms = 3; + bool all_metadata = 4; + uint32 manifest_limit = 5; + + // Import naming + + // extra_references are used to set image names on imports of sub-images from the index + repeated ImageReference extra_references = 6; + + // Unpack Configuration, multiple allowed + + repeated UnpackConfiguration unpacks = 10; +} + +message UnpackConfiguration { + // platform is the platform to unpack for, used for resolving manifest and snapshotter + // if not provided + types.Platform platform = 1; + + // snapshotter to unpack to, if not provided default for platform shoudl be used + string snapshotter = 2; +} + +// ImageReference is used to create or find a reference for an image +message ImageReference { + string name = 1; + + // is_prefix determines whether the Name should be considered + // a prefix (without tag or digest). + // For lookup, this may allow matching multiple tags. + // For store, this must have a tag or digest added. + bool is_prefix = 2; + + // allow_overwrite allows overwriting or ignoring the name if + // another reference is provided (such as through an annotation). + // Only used if IsPrefix is true. + bool allow_overwrite = 3; + + // add_digest adds the manifest digest to the reference. + // For lookup, this allows matching tags with any digest. + // For store, this allows adding the digest to the name. + // Only used if IsPrefix is true. + bool add_digest = 4; + + // skip_named_digest only considers digest references which do not + // have a non-digested named reference. + // For lookup, this will deduplicate digest references when there is a named match. + // For store, this only adds this digest reference when there is no matching full + // name reference from the prefix. + // Only used if IsPrefix is true. + bool skip_named_digest = 5; +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/importexport.pb.go b/vendor/github.com/containerd/containerd/api/types/transfer/importexport.pb.go new file mode 100644 index 0000000000..a2a48ac152 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/importexport.pb.go @@ -0,0 +1,320 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/types/transfer/importexport.proto + +package transfer + +import ( + types "github.com/containerd/containerd/api/types" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ImageImportStream struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Stream is used to identify the binary input stream for the import operation. + // The stream uses the transfer binary stream protocol with the client as the sender. + // The binary data is expected to be a raw tar stream. + Stream string `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"` + MediaType string `protobuf:"bytes,2,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + ForceCompress bool `protobuf:"varint,3,opt,name=force_compress,json=forceCompress,proto3" json:"force_compress,omitempty"` +} + +func (x *ImageImportStream) Reset() { + *x = ImageImportStream{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImageImportStream) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImageImportStream) ProtoMessage() {} + +func (x *ImageImportStream) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImageImportStream.ProtoReflect.Descriptor instead. +func (*ImageImportStream) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescGZIP(), []int{0} +} + +func (x *ImageImportStream) GetStream() string { + if x != nil { + return x.Stream + } + return "" +} + +func (x *ImageImportStream) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *ImageImportStream) GetForceCompress() bool { + if x != nil { + return x.ForceCompress + } + return false +} + +type ImageExportStream struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Stream is used to identify the binary output stream for the export operation. + // The stream uses the transfer binary stream protocol with the server as the sender. + // The binary data is expected to be a raw tar stream. + Stream string `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"` + MediaType string `protobuf:"bytes,2,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // The specified platforms + Platforms []*types.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms,omitempty"` + // Whether to include all platforms + AllPlatforms bool `protobuf:"varint,4,opt,name=all_platforms,json=allPlatforms,proto3" json:"all_platforms,omitempty"` + // Skips the creation of the Docker compatible manifest.json file + SkipCompatibilityManifest bool `protobuf:"varint,5,opt,name=skip_compatibility_manifest,json=skipCompatibilityManifest,proto3" json:"skip_compatibility_manifest,omitempty"` + // Excludes non-distributable blobs such as Windows base layers. + SkipNonDistributable bool `protobuf:"varint,6,opt,name=skip_non_distributable,json=skipNonDistributable,proto3" json:"skip_non_distributable,omitempty"` +} + +func (x *ImageExportStream) Reset() { + *x = ImageExportStream{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImageExportStream) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImageExportStream) ProtoMessage() {} + +func (x *ImageExportStream) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImageExportStream.ProtoReflect.Descriptor instead. +func (*ImageExportStream) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescGZIP(), []int{1} +} + +func (x *ImageExportStream) GetStream() string { + if x != nil { + return x.Stream + } + return "" +} + +func (x *ImageExportStream) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *ImageExportStream) GetPlatforms() []*types.Platform { + if x != nil { + return x.Platforms + } + return nil +} + +func (x *ImageExportStream) GetAllPlatforms() bool { + if x != nil { + return x.AllPlatforms + } + return false +} + +func (x *ImageExportStream) GetSkipCompatibilityManifest() bool { + if x != nil { + return x.SkipCompatibilityManifest + } + return false +} + +func (x *ImageExportStream) GetSkipNonDistributable() bool { + if x != nil { + return x.SkipNonDistributable + } + return false +} + +var File_github_com_containerd_containerd_api_types_transfer_importexport_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDesc = []byte{ + 0x0a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x66, 0x65, 0x72, 0x1a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x71, + 0x0a, 0x11, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x6f, + 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0d, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, + 0x73, 0x22, 0x9f, 0x02, 0x0a, 0x11, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x72, + 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, + 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x09, 0x70, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x5f, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0c, 0x61, 0x6c, 0x6c, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x12, 0x3e, 0x0a, + 0x1b, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x5f, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x19, 0x73, 0x6b, 0x69, 0x70, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, + 0x16, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x6e, 0x5f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, + 0x6b, 0x69, 0x70, 0x4e, 0x6f, 0x6e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_github_com_containerd_containerd_api_types_transfer_importexport_proto_goTypes = []interface{}{ + (*ImageImportStream)(nil), // 0: containerd.types.transfer.ImageImportStream + (*ImageExportStream)(nil), // 1: containerd.types.transfer.ImageExportStream + (*types.Platform)(nil), // 2: containerd.types.Platform +} +var file_github_com_containerd_containerd_api_types_transfer_importexport_proto_depIdxs = []int32{ + 2, // 0: containerd.types.transfer.ImageExportStream.platforms:type_name -> containerd.types.Platform + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_transfer_importexport_proto_init() } +func file_github_com_containerd_containerd_api_types_transfer_importexport_proto_init() { + if File_github_com_containerd_containerd_api_types_transfer_importexport_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageImportStream); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageExportStream); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_transfer_importexport_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_importexport_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_transfer_importexport_proto = out.File + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/importexport.proto b/vendor/github.com/containerd/containerd/api/types/transfer/importexport.proto new file mode 100644 index 0000000000..c18bae1c64 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/importexport.proto @@ -0,0 +1,52 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.types.transfer; + +option go_package = "github.com/containerd/containerd/api/types/transfer"; + +import "github.com/containerd/containerd/api/types/platform.proto"; + +message ImageImportStream { + // Stream is used to identify the binary input stream for the import operation. + // The stream uses the transfer binary stream protocol with the client as the sender. + // The binary data is expected to be a raw tar stream. + string stream = 1; + + string media_type = 2; + + bool force_compress = 3; +} + +message ImageExportStream { + // Stream is used to identify the binary output stream for the export operation. + // The stream uses the transfer binary stream protocol with the server as the sender. + // The binary data is expected to be a raw tar stream. + string stream = 1; + + string media_type = 2; + + // The specified platforms + repeated types.Platform platforms = 3; + // Whether to include all platforms + bool all_platforms = 4; + // Skips the creation of the Docker compatible manifest.json file + bool skip_compatibility_manifest = 5; + // Excludes non-distributable blobs such as Windows base layers. + bool skip_non_distributable = 6; +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/progress.pb.go b/vendor/github.com/containerd/containerd/api/types/transfer/progress.pb.go new file mode 100644 index 0000000000..62d12b34db --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/progress.pb.go @@ -0,0 +1,202 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/types/transfer/progress.proto + +package transfer + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Progress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Event string `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Parents []string `protobuf:"bytes,3,rep,name=parents,proto3" json:"parents,omitempty"` + Progress int64 `protobuf:"varint,4,opt,name=progress,proto3" json:"progress,omitempty"` + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` +} + +func (x *Progress) Reset() { + *x = Progress{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Progress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Progress) ProtoMessage() {} + +func (x *Progress) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Progress.ProtoReflect.Descriptor instead. +func (*Progress) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescGZIP(), []int{0} +} + +func (x *Progress) GetEvent() string { + if x != nil { + return x.Event + } + return "" +} + +func (x *Progress) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Progress) GetParents() []string { + if x != nil { + return x.Parents + } + return nil +} + +func (x *Progress) GetProgress() int64 { + if x != nil { + return x.Progress + } + return 0 +} + +func (x *Progress) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +var File_github_com_containerd_containerd_api_types_transfer_progress_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDesc = []byte{ + 0x0a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x22, + 0x80, 0x01, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_types_transfer_progress_proto_goTypes = []interface{}{ + (*Progress)(nil), // 0: containerd.types.transfer.Progress +} +var file_github_com_containerd_containerd_api_types_transfer_progress_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_transfer_progress_proto_init() } +func file_github_com_containerd_containerd_api_types_transfer_progress_proto_init() { + if File_github_com_containerd_containerd_api_types_transfer_progress_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Progress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_transfer_progress_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_progress_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_transfer_progress_proto = out.File + file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_transfer_progress_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_transfer_progress_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto b/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto new file mode 100644 index 0000000000..3059bcbb12 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.types.transfer; + +option go_package = "github.com/containerd/containerd/api/types/transfer"; + +message Progress { + string event = 1; + string name = 2; + repeated string parents = 3; + int64 progress = 4; + int64 total = 5; +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/registry.pb.go b/vendor/github.com/containerd/containerd/api/types/transfer/registry.pb.go new file mode 100644 index 0000000000..57bb80bae9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/registry.pb.go @@ -0,0 +1,518 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/types/transfer/registry.proto + +package transfer + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AuthType int32 + +const ( + AuthType_NONE AuthType = 0 + // CREDENTIALS is used to exchange username/password for access token + // using an oauth or "Docker Registry Token" server + AuthType_CREDENTIALS AuthType = 1 + // REFRESH is used to exchange secret for access token using an oauth + // or "Docker Registry Token" server + AuthType_REFRESH AuthType = 2 + // HEADER is used to set the HTTP Authorization header to secret + // directly for the registry. + // Value should be ` ` + AuthType_HEADER AuthType = 3 +) + +// Enum value maps for AuthType. +var ( + AuthType_name = map[int32]string{ + 0: "NONE", + 1: "CREDENTIALS", + 2: "REFRESH", + 3: "HEADER", + } + AuthType_value = map[string]int32{ + "NONE": 0, + "CREDENTIALS": 1, + "REFRESH": 2, + "HEADER": 3, + } +) + +func (x AuthType) Enum() *AuthType { + p := new(AuthType) + *p = x + return p +} + +func (x AuthType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AuthType) Descriptor() protoreflect.EnumDescriptor { + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_enumTypes[0].Descriptor() +} + +func (AuthType) Type() protoreflect.EnumType { + return &file_github_com_containerd_containerd_api_types_transfer_registry_proto_enumTypes[0] +} + +func (x AuthType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AuthType.Descriptor instead. +func (AuthType) EnumDescriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{0} +} + +type OCIRegistry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Reference string `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"` + Resolver *RegistryResolver `protobuf:"bytes,2,opt,name=resolver,proto3" json:"resolver,omitempty"` +} + +func (x *OCIRegistry) Reset() { + *x = OCIRegistry{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OCIRegistry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OCIRegistry) ProtoMessage() {} + +func (x *OCIRegistry) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OCIRegistry.ProtoReflect.Descriptor instead. +func (*OCIRegistry) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{0} +} + +func (x *OCIRegistry) GetReference() string { + if x != nil { + return x.Reference + } + return "" +} + +func (x *OCIRegistry) GetResolver() *RegistryResolver { + if x != nil { + return x.Resolver + } + return nil +} + +type RegistryResolver struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // auth_stream is used to refer to a stream which auth callbacks may be + // made on. + AuthStream string `protobuf:"bytes,1,opt,name=auth_stream,json=authStream,proto3" json:"auth_stream,omitempty"` + // Headers + Headers map[string]string `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RegistryResolver) Reset() { + *x = RegistryResolver{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegistryResolver) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegistryResolver) ProtoMessage() {} + +func (x *RegistryResolver) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegistryResolver.ProtoReflect.Descriptor instead. +func (*RegistryResolver) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{1} +} + +func (x *RegistryResolver) GetAuthStream() string { + if x != nil { + return x.AuthStream + } + return "" +} + +func (x *RegistryResolver) GetHeaders() map[string]string { + if x != nil { + return x.Headers + } + return nil +} + +// AuthRequest is sent as a callback on a stream +type AuthRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // host is the registry host + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // reference is the namespace and repository name requested from the registry + Reference string `protobuf:"bytes,2,opt,name=reference,proto3" json:"reference,omitempty"` + // wwwauthenticate is the HTTP WWW-Authenticate header values returned from the registry + Wwwauthenticate []string `protobuf:"bytes,3,rep,name=wwwauthenticate,proto3" json:"wwwauthenticate,omitempty"` +} + +func (x *AuthRequest) Reset() { + *x = AuthRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthRequest) ProtoMessage() {} + +func (x *AuthRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthRequest.ProtoReflect.Descriptor instead. +func (*AuthRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{2} +} + +func (x *AuthRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *AuthRequest) GetReference() string { + if x != nil { + return x.Reference + } + return "" +} + +func (x *AuthRequest) GetWwwauthenticate() []string { + if x != nil { + return x.Wwwauthenticate + } + return nil +} + +type AuthResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AuthType AuthType `protobuf:"varint,1,opt,name=authType,proto3,enum=containerd.types.transfer.AuthType" json:"authType,omitempty"` + Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"` + Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username,omitempty"` + ExpireAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expire_at,json=expireAt,proto3" json:"expire_at,omitempty"` // TODO: Stream error +} + +func (x *AuthResponse) Reset() { + *x = AuthResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthResponse) ProtoMessage() {} + +func (x *AuthResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthResponse.ProtoReflect.Descriptor instead. +func (*AuthResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{3} +} + +func (x *AuthResponse) GetAuthType() AuthType { + if x != nil { + return x.AuthType + } + return AuthType_NONE +} + +func (x *AuthResponse) GetSecret() string { + if x != nil { + return x.Secret + } + return "" +} + +func (x *AuthResponse) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *AuthResponse) GetExpireAt() *timestamppb.Timestamp { + if x != nil { + return x.ExpireAt + } + return nil +} + +var File_github_com_containerd_containerd_api_types_transfer_registry_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDesc = []byte{ + 0x0a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x74, 0x0a, 0x0b, 0x4f, 0x43, 0x49, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12, + 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x47, 0x0a, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x52, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x22, 0xc3, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x61, + 0x75, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x52, 0x0a, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x69, 0x0a, 0x0b, + 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, + 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x28, 0x0a, + 0x0f, 0x77, 0x77, 0x77, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x77, 0x77, 0x61, 0x75, 0x74, 0x68, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x68, + 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x08, 0x61, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, + 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x41, 0x74, 0x2a, 0x3e, 0x0a, 0x08, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, + 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x52, 0x45, 0x46, 0x52, 0x45, 0x53, 0x48, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x45, + 0x41, 0x44, 0x45, 0x52, 0x10, 0x03, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_transfer_registry_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_github_com_containerd_containerd_api_types_transfer_registry_proto_goTypes = []interface{}{ + (AuthType)(0), // 0: containerd.types.transfer.AuthType + (*OCIRegistry)(nil), // 1: containerd.types.transfer.OCIRegistry + (*RegistryResolver)(nil), // 2: containerd.types.transfer.RegistryResolver + (*AuthRequest)(nil), // 3: containerd.types.transfer.AuthRequest + (*AuthResponse)(nil), // 4: containerd.types.transfer.AuthResponse + nil, // 5: containerd.types.transfer.RegistryResolver.HeadersEntry + (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp +} +var file_github_com_containerd_containerd_api_types_transfer_registry_proto_depIdxs = []int32{ + 2, // 0: containerd.types.transfer.OCIRegistry.resolver:type_name -> containerd.types.transfer.RegistryResolver + 5, // 1: containerd.types.transfer.RegistryResolver.headers:type_name -> containerd.types.transfer.RegistryResolver.HeadersEntry + 0, // 2: containerd.types.transfer.AuthResponse.authType:type_name -> containerd.types.transfer.AuthType + 6, // 3: containerd.types.transfer.AuthResponse.expire_at:type_name -> google.protobuf.Timestamp + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_transfer_registry_proto_init() } +func file_github_com_containerd_containerd_api_types_transfer_registry_proto_init() { + if File_github_com_containerd_containerd_api_types_transfer_registry_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OCIRegistry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegistryResolver); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDesc, + NumEnums: 1, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_transfer_registry_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_registry_proto_depIdxs, + EnumInfos: file_github_com_containerd_containerd_api_types_transfer_registry_proto_enumTypes, + MessageInfos: file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_transfer_registry_proto = out.File + file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_transfer_registry_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_transfer_registry_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto b/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto new file mode 100644 index 0000000000..0b3ce68b4c --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto @@ -0,0 +1,79 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.types.transfer; + +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/containerd/containerd/api/types/transfer"; + +message OCIRegistry { + string reference = 1; + RegistryResolver resolver = 2; +} + +message RegistryResolver { + // auth_stream is used to refer to a stream which auth callbacks may be + // made on. + string auth_stream = 1; + + // Headers + map headers = 2; + + // Allow custom hosts dir? + // Force skip verify + // Force HTTP + // CA callback? Client TLS callback? +} + +// AuthRequest is sent as a callback on a stream +message AuthRequest { + // host is the registry host + string host = 1; + + // reference is the namespace and repository name requested from the registry + string reference = 2; + + // wwwauthenticate is the HTTP WWW-Authenticate header values returned from the registry + repeated string wwwauthenticate = 3; +} + +enum AuthType { + NONE = 0; + + // CREDENTIALS is used to exchange username/password for access token + // using an oauth or "Docker Registry Token" server + CREDENTIALS = 1; + + // REFRESH is used to exchange secret for access token using an oauth + // or "Docker Registry Token" server + REFRESH = 2; + + // HEADER is used to set the HTTP Authorization header to secret + // directly for the registry. + // Value should be ` ` + HEADER = 3; +} + +message AuthResponse { + AuthType authType = 1; + string secret = 2; + string username = 3; + google.protobuf.Timestamp expire_at = 4; + // TODO: Stream error +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/streaming.pb.go b/vendor/github.com/containerd/containerd/api/types/transfer/streaming.pb.go new file mode 100644 index 0000000000..bdfa8d5d24 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/streaming.pb.go @@ -0,0 +1,226 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/types/transfer/streaming.proto + +package transfer + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Data struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *Data) Reset() { + *x = Data{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Data) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Data) ProtoMessage() {} + +func (x *Data) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Data.ProtoReflect.Descriptor instead. +func (*Data) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescGZIP(), []int{0} +} + +func (x *Data) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type WindowUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Update int32 `protobuf:"varint,1,opt,name=update,proto3" json:"update,omitempty"` +} + +func (x *WindowUpdate) Reset() { + *x = WindowUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WindowUpdate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WindowUpdate) ProtoMessage() {} + +func (x *WindowUpdate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WindowUpdate.ProtoReflect.Descriptor instead. +func (*WindowUpdate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescGZIP(), []int{1} +} + +func (x *WindowUpdate) GetUpdate() int32 { + if x != nil { + return x.Update + } + return 0 +} + +var File_github_com_containerd_containerd_api_types_transfer_streaming_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDesc = []byte{ + 0x0a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, + 0x22, 0x1a, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x26, 0x0a, 0x0c, + 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_github_com_containerd_containerd_api_types_transfer_streaming_proto_goTypes = []interface{}{ + (*Data)(nil), // 0: containerd.types.transfer.Data + (*WindowUpdate)(nil), // 1: containerd.types.transfer.WindowUpdate +} +var file_github_com_containerd_containerd_api_types_transfer_streaming_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_transfer_streaming_proto_init() } +func file_github_com_containerd_containerd_api_types_transfer_streaming_proto_init() { + if File_github_com_containerd_containerd_api_types_transfer_streaming_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Data); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WindowUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_transfer_streaming_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_streaming_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_transfer_streaming_proto = out.File + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/streaming.proto b/vendor/github.com/containerd/containerd/api/types/transfer/streaming.proto new file mode 100644 index 0000000000..234956c2c2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/streaming.proto @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.types.transfer; + +option go_package = "github.com/containerd/containerd/api/types/transfer"; + +message Data { + bytes data = 1; +} + +message WindowUpdate { + int32 update = 1; +} diff --git a/vendor/github.com/containerd/containerd/mount/subprocess_unsafe_linux.go b/vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go similarity index 68% rename from vendor/github.com/containerd/containerd/mount/subprocess_unsafe_linux.go rename to vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go index c7cb0c0343..3516494ac0 100644 --- a/vendor/github.com/containerd/containerd/mount/subprocess_unsafe_linux.go +++ b/vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go @@ -1,3 +1,5 @@ +//go:build gofuzz + /* Copyright The containerd Authors. @@ -14,17 +16,13 @@ limitations under the License. */ -package mount +package compression import ( - _ "unsafe" // required for go:linkname. + "bytes" ) -//go:linkname beforeFork syscall.runtime_BeforeFork -func beforeFork() - -//go:linkname afterFork syscall.runtime_AfterFork -func afterFork() - -//go:linkname afterForkInChild syscall.runtime_AfterForkInChild -func afterForkInChild() +func FuzzDecompressStream(data []byte) int { + _, _ = DecompressStream(bytes.NewReader(data)) + return 1 +} diff --git a/vendor/github.com/containerd/containerd/sys/userns_deprecated.go b/vendor/github.com/containerd/containerd/archive/link_default.go similarity index 68% rename from vendor/github.com/containerd/containerd/sys/userns_deprecated.go rename to vendor/github.com/containerd/containerd/archive/link_default.go index 53acf55477..84a8997eb5 100644 --- a/vendor/github.com/containerd/containerd/sys/userns_deprecated.go +++ b/vendor/github.com/containerd/containerd/archive/link_default.go @@ -1,3 +1,5 @@ +//go:build !freebsd + /* Copyright The containerd Authors. @@ -14,10 +16,10 @@ limitations under the License. */ -package sys +package archive -import "github.com/containerd/containerd/pkg/userns" +import "os" -// RunningInUserNS detects whether we are currently running in a user namespace. -// Deprecated: use github.com/containerd/containerd/pkg/userns.RunningInUserNS instead. -var RunningInUserNS = userns.RunningInUserNS +func link(oldname, newname string) error { + return os.Link(oldname, newname) +} diff --git a/vendor/github.com/containerd/containerd/archive/link_freebsd.go b/vendor/github.com/containerd/containerd/archive/link_freebsd.go new file mode 100644 index 0000000000..2097db06bc --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/link_freebsd.go @@ -0,0 +1,82 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +func link(oldname, newname string) error { + e := ignoringEINTR(func() error { + return unix.Linkat(unix.AT_FDCWD, oldname, unix.AT_FDCWD, newname, 0) + }) + if e != nil { + return &os.LinkError{Op: "link", Old: oldname, New: newname, Err: e} + } + return nil +} + +// The following contents were copied from Go 1.18.2. +// Use of this source code is governed by the following +// BSD-style license: +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// ignoringEINTR makes a function call and repeats it if it returns an +// EINTR error. This appears to be required even though we install all +// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846. +// Also #20400 and #36644 are issues in which a signal handler is +// installed without setting SA_RESTART. None of these are the common case, +// but there are enough of them that it seems that we can't avoid +// an EINTR loop. +func ignoringEINTR(fn func() error) error { + for { + err := fn() + if err != syscall.EINTR { + return err + } + } +} diff --git a/vendor/github.com/containerd/containerd/archive/tar.go b/vendor/github.com/containerd/containerd/archive/tar.go index 2023db3870..28b623dcf2 100644 --- a/vendor/github.com/containerd/containerd/archive/tar.go +++ b/vendor/github.com/containerd/containerd/archive/tar.go @@ -24,7 +24,6 @@ import ( "io" "os" "path/filepath" - "runtime" "strings" "sync" "syscall" @@ -32,6 +31,7 @@ import ( "github.com/containerd/containerd/archive/tarheader" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/epoch" "github.com/containerd/containerd/pkg/userns" "github.com/containerd/continuity/fs" ) @@ -52,11 +52,11 @@ var errInvalidArchive = errors.New("invalid archive") // files will be prepended with the prefix ".wh.". This style is // based off AUFS whiteouts. // See https://github.com/opencontainers/image-spec/blob/main/layer.md -func Diff(ctx context.Context, a, b string) io.ReadCloser { +func Diff(ctx context.Context, a, b string, opts ...WriteDiffOpt) io.ReadCloser { r, w := io.Pipe() go func() { - err := WriteDiff(ctx, w, a, b) + err := WriteDiff(ctx, w, a, b, opts...) if err != nil { log.G(ctx).WithError(err).Debugf("write diff failed") } @@ -82,6 +82,10 @@ func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffO return fmt.Errorf("failed to apply option: %w", err) } } + if tm := epoch.FromContext(ctx); tm != nil && options.SourceDateEpoch == nil { + options.SourceDateEpoch = tm + } + if options.writeDiffFunc == nil { options.writeDiffFunc = writeDiffNaive } @@ -96,8 +100,14 @@ func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffO // files will be prepended with the prefix ".wh.". This style is // based off AUFS whiteouts. // See https://github.com/opencontainers/image-spec/blob/main/layer.md -func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, _ WriteDiffOptions) error { - cw := NewChangeWriter(w, b) +func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, o WriteDiffOptions) error { + var opts []ChangeWriterOpt + if o.SourceDateEpoch != nil { + opts = append(opts, + WithModTimeUpperBound(*o.SourceDateEpoch), + WithWhiteoutTime(*o.SourceDateEpoch)) + } + cw := NewChangeWriter(w, b, opts...) err := fs.Changes(ctx, a, b, cw.HandleChange) if err != nil { return fmt.Errorf("failed to create diff tar stream: %w", err) @@ -291,7 +301,7 @@ func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOpti srcData := io.Reader(tr) srcHdr := hdr - if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil { + if err := createTarFile(ctx, path, root, srcHdr, srcData, options.NoSameOwner); err != nil { return 0, err } @@ -316,7 +326,7 @@ func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOpti return size, nil } -func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header, reader io.Reader) error { +func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header, reader io.Reader, noSameOwner bool) error { // hdr.Mode is in linux format, which we can use for syscalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) @@ -365,7 +375,7 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header return err } - if err := os.Link(targetPath, path); err != nil { + if err := link(targetPath, path); err != nil { return err } @@ -382,8 +392,7 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } - // Lchown is not supported on Windows. - if runtime.GOOS != "windows" { + if !noSameOwner { if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil { err = fmt.Errorf("failed to Lchown %q for UID %d, GID %d: %w", path, hdr.Uid, hdr.Gid, err) if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() { @@ -493,26 +502,48 @@ func mkparent(ctx context.Context, path, root string, parents []string) error { // See also https://github.com/opencontainers/image-spec/blob/main/layer.md for details // about OCI layers type ChangeWriter struct { - tw *tar.Writer - source string - whiteoutT time.Time - inodeSrc map[uint64]string - inodeRefs map[uint64][]string - addedDirs map[string]struct{} + tw *tar.Writer + source string + modTimeUpperBound *time.Time + whiteoutT time.Time + inodeSrc map[uint64]string + inodeRefs map[uint64][]string + addedDirs map[string]struct{} +} + +// ChangeWriterOpt can be specified in NewChangeWriter. +type ChangeWriterOpt func(cw *ChangeWriter) + +// WithModTimeUpperBound sets the mod time upper bound. +func WithModTimeUpperBound(tm time.Time) ChangeWriterOpt { + return func(cw *ChangeWriter) { + cw.modTimeUpperBound = &tm + } +} + +// WithWhiteoutTime sets the whiteout timestamp. +func WithWhiteoutTime(tm time.Time) ChangeWriterOpt { + return func(cw *ChangeWriter) { + cw.whiteoutT = tm + } } // NewChangeWriter returns ChangeWriter that writes tar stream of the source directory // to the privided writer. Change information (add/modify/delete/unmodified) for each // file needs to be passed through HandleChange method. -func NewChangeWriter(w io.Writer, source string) *ChangeWriter { - return &ChangeWriter{ +func NewChangeWriter(w io.Writer, source string, opts ...ChangeWriterOpt) *ChangeWriter { + cw := &ChangeWriter{ tw: tar.NewWriter(w), source: source, - whiteoutT: time.Now(), + whiteoutT: time.Now(), // can be overridden with WithWhiteoutTime(time.Time) ChangeWriterOpt . inodeSrc: map[uint64]string{}, inodeRefs: map[uint64][]string{}, addedDirs: map[string]struct{}{}, } + for _, o := range opts { + o(cw) + } + return cw } // HandleChange receives filesystem change information and reflect that information to @@ -566,6 +597,9 @@ func (cw *ChangeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e // truncate timestamp for compatibility. without PAX stdlib rounds timestamps instead hdr.Format = tar.FormatPAX + if cw.modTimeUpperBound != nil && hdr.ModTime.After(*cw.modTimeUpperBound) { + hdr.ModTime = *cw.modTimeUpperBound + } hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} @@ -577,11 +611,9 @@ func (cw *ChangeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e return fmt.Errorf("failed to make path relative: %w", err) } } - name, err = tarName(name) - if err != nil { - return fmt.Errorf("cannot canonicalize path: %w", err) - } - // suffix with '/' for directories + // Canonicalize to POSIX-style paths using forward slashes. Directory + // entries must end with a slash. + name = filepath.ToSlash(name) if f.IsDir() && !strings.HasSuffix(name, "/") { name += "/" } diff --git a/vendor/github.com/containerd/containerd/archive/tar_mostunix.go b/vendor/github.com/containerd/containerd/archive/tar_mostunix.go index d2d970356c..ca5e602a03 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_mostunix.go +++ b/vendor/github.com/containerd/containerd/archive/tar_mostunix.go @@ -1,5 +1,4 @@ //go:build !windows && !freebsd -// +build !windows,!freebsd /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts.go b/vendor/github.com/containerd/containerd/archive/tar_opts.go index 58985555a5..ac3a03c8d4 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_opts.go +++ b/vendor/github.com/containerd/containerd/archive/tar_opts.go @@ -20,6 +20,7 @@ import ( "archive/tar" "context" "io" + "time" ) // ApplyOptions provides additional options for an Apply operation @@ -27,6 +28,7 @@ type ApplyOptions struct { Filter Filter // Filter tar headers ConvertWhiteout ConvertWhiteout // Convert whiteout files Parents []string // Parent directories to handle inherited attributes without CoW + NoSameOwner bool // NoSameOwner will not attempt to preserve the owner specified in the tar archive. applyFunc func(context.Context, string, io.Reader, ApplyOptions) (int64, error) } @@ -61,6 +63,15 @@ func WithConvertWhiteout(c ConvertWhiteout) ApplyOpt { } } +// WithNoSameOwner is same as '--no-same-owner` in 'tar' command. +// It'll skip attempt to preserve the owner specified in the tar archive. +func WithNoSameOwner() ApplyOpt { + return func(options *ApplyOptions) error { + options.NoSameOwner = true + return nil + } +} + // WithParents provides parent directories for resolving inherited attributes // directory from the filesystem. // Inherited attributes are searched from first to last, making the first @@ -79,7 +90,22 @@ type WriteDiffOptions struct { ParentLayers []string // Windows needs the full list of parent layers writeDiffFunc func(context.Context, io.Writer, string, string, WriteDiffOptions) error + + // SourceDateEpoch specifies the following timestamps to provide control for reproducibility. + // - The upper bound timestamp of the diff contents + // - The timestamp of the whiteouts + // + // See also https://reproducible-builds.org/docs/source-date-epoch/ . + SourceDateEpoch *time.Time } // WriteDiffOpt allows setting mutable archive write properties on creation type WriteDiffOpt func(options *WriteDiffOptions) error + +// WithSourceDateEpoch specifies the SOURCE_DATE_EPOCH without touching the env vars. +func WithSourceDateEpoch(tm *time.Time) WriteDiffOpt { + return func(options *WriteDiffOptions) error { + options.SourceDateEpoch = tm + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/archive/tar_unix.go b/vendor/github.com/containerd/containerd/archive/tar_unix.go index d84dfd8c07..8e883eee62 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_unix.go +++ b/vendor/github.com/containerd/containerd/archive/tar_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -34,10 +33,6 @@ import ( "golang.org/x/sys/unix" ) -func tarName(p string) (string, error) { - return p, nil -} - func chmodTarEntry(perm os.FileMode) os.FileMode { return perm } diff --git a/vendor/github.com/containerd/containerd/archive/tar_windows.go b/vendor/github.com/containerd/containerd/archive/tar_windows.go index 4b71c1e307..dc8f64ee41 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_windows.go +++ b/vendor/github.com/containerd/containerd/archive/tar_windows.go @@ -23,24 +23,9 @@ import ( "os" "strings" - "github.com/containerd/containerd/sys" + "github.com/moby/sys/sequential" ) -// tarName returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func tarName(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - return "", fmt.Errorf("windows path contains forward slash: %s", p) - } - - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil -} - // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { @@ -57,15 +42,15 @@ func setHeaderForSpecialDevice(*tar.Header, string, os.FileInfo) error { } func open(p string) (*os.File, error) { - // We use sys.OpenSequential to ensure we use sequential file - // access on Windows to avoid depleting the standby list. - return sys.OpenSequential(p) + // We use sequential file access to avoid depleting the standby list on + // Windows. + return sequential.Open(p) } func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { - // Source is regular file. We use sys.OpenFileSequential to use sequential - // file access to avoid depleting the standby list on Windows. - return sys.OpenFileSequential(name, flag, perm) + // Source is regular file. We use sequential file access to avoid depleting + // the standby list on Windows. + return sequential.OpenFile(name, flag, perm) } func mkdir(path string, perm os.FileMode) error { diff --git a/vendor/github.com/containerd/containerd/archive/time_unix.go b/vendor/github.com/containerd/containerd/archive/time_unix.go index 043e374538..b5a10d528f 100644 --- a/vendor/github.com/containerd/containerd/archive/time_unix.go +++ b/vendor/github.com/containerd/containerd/archive/time_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/archive/time_windows.go b/vendor/github.com/containerd/containerd/archive/time_windows.go index 71f397821a..d906a3e5e7 100644 --- a/vendor/github.com/containerd/containerd/archive/time_windows.go +++ b/vendor/github.com/containerd/containerd/archive/time_windows.go @@ -25,18 +25,17 @@ import ( // chtimes will set the create time on a file using the given modtime. // This requires calling SetFileTime and explicitly including the create time. func chtimes(path string, atime, mtime time.Time) error { - ctimespec := windows.NsecToTimespec(mtime.UnixNano()) - pathp, e := windows.UTF16PtrFromString(path) - if e != nil { - return e + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return err } - h, e := windows.CreateFile(pathp, + h, err := windows.CreateFile(pathp, windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e + if err != nil { + return err } defer windows.Close(h) - c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + c := windows.NsecToFiletime(mtime.UnixNano()) return windows.SetFileTime(h, &c, nil, nil) } diff --git a/vendor/github.com/containerd/containerd/cio/io.go b/vendor/github.com/containerd/containerd/cio/io.go index 8ee13edda4..11724f8d88 100644 --- a/vendor/github.com/containerd/containerd/cio/io.go +++ b/vendor/github.com/containerd/containerd/cio/io.go @@ -18,7 +18,6 @@ package cio import ( "context" - "errors" "fmt" "io" "net/url" @@ -167,6 +166,15 @@ func NewAttach(opts ...Opt) Attach { if fifos == nil { return nil, fmt.Errorf("cannot attach, missing fifos") } + if streams.Stdin == nil { + fifos.Stdin = "" + } + if streams.Stdout == nil { + fifos.Stdout = "" + } + if streams.Stderr == nil { + fifos.Stderr = "" + } return copyIO(fifos, streams) } } @@ -302,14 +310,21 @@ func LogFile(path string) Creator { // LogURIGenerator is the helper to generate log uri with specific scheme. func LogURIGenerator(scheme string, path string, args map[string]string) (*url.URL, error) { path = filepath.Clean(path) - if !strings.HasPrefix(path, "/") { - return nil, errors.New("absolute path needed") + if !filepath.IsAbs(path) { + return nil, fmt.Errorf("%q must be absolute", path) } - uri := &url.URL{ - Scheme: scheme, - Path: path, + // Without adding / here, C:\foo\bar.txt will become file://C:/foo/bar.txt + // which is invalid. The path must have three slashes. + // + // https://learn.microsoft.com/en-us/archive/blogs/ie/file-uris-in-windows + // > In the case of a local Windows file path, there is no hostname, + // > and thus another slash and the path immediately follow. + p := filepath.ToSlash(path) + if !strings.HasPrefix(path, "/") { + p = "/" + p } + uri := &url.URL{Scheme: scheme, Path: p} if len(args) == 0 { return uri, nil diff --git a/vendor/github.com/containerd/containerd/cio/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go index 5606cc88a9..9dc21dcc88 100644 --- a/vendor/github.com/containerd/containerd/cio/io_unix.go +++ b/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -99,7 +98,14 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) { config: fifos.Config, wg: wg, closers: append(pipes.closers(), fifos), - cancel: cancel, + cancel: func() { + cancel() + for _, c := range pipes.closers() { + if c != nil { + c.Close() + } + } + }, }, nil } diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go index 1c2202e1ec..a62217b961 100644 --- a/vendor/github.com/containerd/containerd/client.go +++ b/vendor/github.com/containerd/containerd/client.go @@ -35,6 +35,7 @@ import ( introspectionapi "github.com/containerd/containerd/api/services/introspection/v1" leasesapi "github.com/containerd/containerd/api/services/leases/v1" namespacesapi "github.com/containerd/containerd/api/services/namespaces/v1" + sandboxsapi "github.com/containerd/containerd/api/services/sandbox/v1" snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1" "github.com/containerd/containerd/api/services/tasks/v1" versionservice "github.com/containerd/containerd/api/services/version/v1" @@ -52,15 +53,17 @@ import ( "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/plugin" + ptypes "github.com/containerd/containerd/protobuf/types" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/containerd/sandbox" + sandboxproxy "github.com/containerd/containerd/sandbox/proxy" "github.com/containerd/containerd/services/introspection" "github.com/containerd/containerd/snapshots" snproxy "github.com/containerd/containerd/snapshots/proxy" - "github.com/containerd/typeurl" - ptypes "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-spec/specs-go" "golang.org/x/sync/semaphore" "google.golang.org/grpc" "google.golang.org/grpc/backoff" @@ -185,6 +188,12 @@ func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) { runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), } + if copts.defaultPlatform != nil { + c.platform = copts.defaultPlatform + } else { + c.platform = platforms.Default() + } + // check namespace labels for default runtime if copts.defaultRuntime == "" && c.defaultns != "" { if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil { @@ -346,6 +355,8 @@ type RemoteContext struct { // ConvertSchema1 is whether to convert Docker registry schema 1 // manifests. If this option is false then any image which resolves // to schema 1 will return an error since schema 1 is not supported. + // + // Deprecated: use Schema 2 or OCI images. ConvertSchema1 bool // Platforms defines which platforms to handle when doing the image operation. @@ -621,6 +632,11 @@ func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter { return snproxy.NewSnapshotter(snapshotsapi.NewSnapshotsClient(c.conn), snapshotterName) } +// DefaultNamespace return the default namespace +func (c *Client) DefaultNamespace() string { + return c.defaultns +} + // TaskService returns the underlying TasksClient func (c *Client) TaskService() tasks.TasksClient { if c.taskService != nil { @@ -688,6 +704,26 @@ func (c *Client) EventService() EventService { return NewEventServiceFromClient(eventsapi.NewEventsClient(c.conn)) } +// SandboxStore returns the underlying sandbox store client +func (c *Client) SandboxStore() sandbox.Store { + if c.sandboxStore != nil { + return c.sandboxStore + } + c.connMu.Lock() + defer c.connMu.Unlock() + return sandboxproxy.NewSandboxStore(sandboxsapi.NewStoreClient(c.conn)) +} + +// SandboxController returns the underlying sandbox controller client +func (c *Client) SandboxController() sandbox.Controller { + if c.sandboxController != nil { + return c.sandboxController + } + c.connMu.Lock() + defer c.connMu.Unlock() + return sandboxproxy.NewSandboxController(sandboxsapi.NewControllerClient(c.conn)) +} + // VersionService returns the underlying VersionClient func (c *Client) VersionService() versionservice.VersionClient { c.connMu.Lock() @@ -819,7 +855,7 @@ func (c *Client) GetSnapshotterSupportedPlatforms(ctx context.Context, snapshott return platforms.Any(snPlatforms...), nil } -func toPlatforms(pt []apitypes.Platform) []ocispec.Platform { +func toPlatforms(pt []*apitypes.Platform) []ocispec.Platform { platforms := make([]ocispec.Platform, len(pt)) for i, p := range pt { platforms[i] = ocispec.Platform{ @@ -830,3 +866,21 @@ func toPlatforms(pt []apitypes.Platform) []ocispec.Platform { } return platforms } + +// GetSnapshotterCapabilities returns the capabilities of a snapshotter. +func (c *Client) GetSnapshotterCapabilities(ctx context.Context, snapshotterName string) ([]string, error) { + filters := []string{fmt.Sprintf("type==%s, id==%s", plugin.SnapshotPlugin, snapshotterName)} + in := c.IntrospectionService() + + resp, err := in.Plugins(ctx, filters) + if err != nil { + return nil, err + } + + if len(resp.Plugins) <= 0 { + return nil, fmt.Errorf("inspection service could not find snapshotter %s plugin", snapshotterName) + } + + sn := resp.Plugins[0] + return sn.Capabilities, nil +} diff --git a/vendor/github.com/containerd/containerd/client_opts.go b/vendor/github.com/containerd/containerd/client_opts.go index 2ef7575d8a..4e0a78a8a3 100644 --- a/vendor/github.com/containerd/containerd/client_opts.go +++ b/vendor/github.com/containerd/containerd/client_opts.go @@ -200,6 +200,8 @@ func WithChildLabelMap(fn func(ocispec.Descriptor) []string) RemoteOpt { // WithSchema1Conversion is used to convert Docker registry schema 1 // manifests to oci manifests on pull. Without this option schema 1 // manifests will return a not supported error. +// +// Deprecated: use Schema 2 or OCI images. func WithSchema1Conversion(client *Client, c *RemoteContext) error { c.ConvertSchema1 = true return nil diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go index 2cf15666f1..7863b742bc 100644 --- a/vendor/github.com/containerd/containerd/container.go +++ b/vendor/github.com/containerd/containerd/container.go @@ -32,10 +32,10 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/oci" + "github.com/containerd/containerd/protobuf" "github.com/containerd/containerd/runtime/v2/runc/options" "github.com/containerd/fifo" - "github.com/containerd/typeurl" - prototypes "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" ver "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/selinux/go-selinux/label" @@ -74,7 +74,7 @@ type Container interface { // SetLabels sets the provided labels for the container and returns the final label set SetLabels(context.Context, map[string]string) (map[string]string, error) // Extensions returns the extensions set on the container - Extensions(context.Context) (map[string]prototypes.Any, error) + Extensions(context.Context) (map[string]typeurl.Any, error) // Update a container Update(context.Context, ...UpdateContainerOpts) error // Checkpoint creates a checkpoint image of the current container @@ -120,7 +120,7 @@ func (c *container) Info(ctx context.Context, opts ...InfoOpts) (containers.Cont return c.metadata, nil } -func (c *container) Extensions(ctx context.Context) (map[string]prototypes.Any, error) { +func (c *container) Extensions(ctx context.Context) (map[string]typeurl.Any, error) { r, err := c.get(ctx) if err != nil { return nil, err @@ -163,7 +163,7 @@ func (c *container) Spec(ctx context.Context) (*oci.Spec, error) { return nil, err } var s oci.Spec - if err := json.Unmarshal(r.Spec.Value, &s); err != nil { + if err := json.Unmarshal(r.Spec.GetValue(), &s); err != nil { return nil, err } return &s, nil @@ -258,6 +258,7 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N request.Rootfs = append(request.Rootfs, &types.Mount{ Type: m.Type, Source: m.Source, + Target: m.Target, Options: m.Options, }) } @@ -275,6 +276,7 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N request.Rootfs = append(request.Rootfs, &types.Mount{ Type: m.Type, Source: m.Source, + Target: m.Target, Options: m.Options, }) } @@ -285,7 +287,7 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N if err != nil { return nil, err } - request.Options = any + request.Options = protobuf.FromAny(any) } t := &task{ client: c.client, @@ -397,7 +399,7 @@ func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, er return nil, err } var i cio.IO - if ioAttach != nil && response.Process.Status != tasktypes.StatusUnknown { + if ioAttach != nil && response.Process.Status != tasktypes.Status_UNKNOWN { // Do not attach IO for task in unknown state, because there // are no fifo paths anyway. if i, err = attachExistingIO(response, ioAttach); err != nil { diff --git a/vendor/github.com/containerd/containerd/container_checkpoint_opts.go b/vendor/github.com/containerd/containerd/container_checkpoint_opts.go index a64ef618ba..64f23823d2 100644 --- a/vendor/github.com/containerd/containerd/container_checkpoint_opts.go +++ b/vendor/github.com/containerd/containerd/container_checkpoint_opts.go @@ -28,9 +28,11 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/containerd/protobuf/proto" "github.com/containerd/containerd/rootfs" "github.com/containerd/containerd/runtime/v2/runc/options" - "github.com/containerd/typeurl" + "github.com/opencontainers/go-digest" imagespec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -56,7 +58,7 @@ func WithCheckpointImage(ctx context.Context, client *Client, c *containers.Cont // WithCheckpointTask includes the running task func WithCheckpointTask(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error { - any, err := typeurl.MarshalAny(copts) + any, err := protobuf.MarshalAnyToProto(copts) if err != nil { return nil } @@ -71,14 +73,14 @@ func WithCheckpointTask(ctx context.Context, client *Client, c *containers.Conta platformSpec := platforms.DefaultSpec() index.Manifests = append(index.Manifests, imagespec.Descriptor{ MediaType: d.MediaType, - Size: d.Size_, - Digest: d.Digest, + Size: d.Size, + Digest: digest.Digest(d.Digest), Platform: &platformSpec, Annotations: d.Annotations, }) } // save copts - data, err := any.Marshal() + data, err := proto.Marshal(any) if err != nil { return err } @@ -97,8 +99,9 @@ func WithCheckpointTask(ctx context.Context, client *Client, c *containers.Conta // WithCheckpointRuntime includes the container runtime info func WithCheckpointRuntime(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error { - if c.Runtime.Options != nil { - data, err := c.Runtime.Options.Marshal() + if c.Runtime.Options != nil && c.Runtime.Options.GetValue() != nil { + any := protobuf.FromAny(c.Runtime.Options) + data, err := proto.Marshal(any) if err != nil { return err } diff --git a/vendor/github.com/containerd/containerd/container_opts.go b/vendor/github.com/containerd/containerd/container_opts.go index 4d630ea6c9..4a937032f5 100644 --- a/vendor/github.com/containerd/containerd/container_opts.go +++ b/vendor/github.com/containerd/containerd/container_opts.go @@ -26,10 +26,11 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/oci" + "github.com/containerd/containerd/protobuf" "github.com/containerd/containerd/snapshots" - "github.com/containerd/typeurl" - "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" "github.com/opencontainers/image-spec/identity" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -57,7 +58,7 @@ type InfoConfig struct { func WithRuntime(name string, options interface{}) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { var ( - any *types.Any + any typeurl.Any err error ) if options != nil { @@ -74,6 +75,15 @@ func WithRuntime(name string, options interface{}) NewContainerOpts { } } +// WithSandbox joins the container to a container group (aka sandbox) from the given ID +// Note: shim runtime must support sandboxes environments. +func WithSandbox(sandboxID string) NewContainerOpts { + return func(ctx context.Context, client *Client, c *containers.Container) error { + c.SandboxID = sandboxID + return nil + } +} + // WithImage sets the provided image as the base for the container func WithImage(i Image) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { @@ -214,6 +224,11 @@ func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts if err != nil { return err } + + parent, err = resolveSnapshotOptions(ctx, client, c.Snapshotter, s, parent, opts...) + if err != nil { + return err + } if _, err := s.Prepare(ctx, id, parent, opts...); err != nil { return err } @@ -258,6 +273,11 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer if err != nil { return err } + + parent, err = resolveSnapshotOptions(ctx, client, c.Snapshotter, s, parent, opts...) + if err != nil { + return err + } if _, err := s.View(ctx, id, parent, opts...); err != nil { return err } @@ -288,9 +308,9 @@ func WithContainerExtension(name string, extension interface{}) NewContainerOpts } if c.Extensions == nil { - c.Extensions = make(map[string]types.Any) + c.Extensions = make(map[string]typeurl.Any) } - c.Extensions[name] = *any + c.Extensions[name] = any return nil } } @@ -298,6 +318,9 @@ func WithContainerExtension(name string, extension interface{}) NewContainerOpts // WithNewSpec generates a new spec for a new container func WithNewSpec(opts ...oci.SpecOpts) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { + if _, ok := namespaces.Namespace(ctx); !ok { + ctx = namespaces.WithNamespace(ctx, client.DefaultNamespace()) + } s, err := oci.GenerateSpec(ctx, client, c, opts...) if err != nil { return err @@ -315,7 +338,7 @@ func WithSpec(s *oci.Spec, opts ...oci.SpecOpts) NewContainerOpts { } var err error - c.Spec, err = typeurl.MarshalAny(s) + c.Spec, err = protobuf.MarshalAnyToProto(s) return err } } diff --git a/vendor/github.com/containerd/containerd/container_opts_unix.go b/vendor/github.com/containerd/containerd/container_opts_unix.go index b6fc37db92..016c1a9258 100644 --- a/vendor/github.com/containerd/containerd/container_opts_unix.go +++ b/vendor/github.com/containerd/containerd/container_opts_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/container_restore_opts.go b/vendor/github.com/containerd/containerd/container_restore_opts.go index bdc8650cda..2afc187013 100644 --- a/vendor/github.com/containerd/containerd/container_restore_opts.go +++ b/vendor/github.com/containerd/containerd/container_restore_opts.go @@ -24,8 +24,8 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/gogo/protobuf/proto" - ptypes "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf/proto" + ptypes "github.com/containerd/containerd/protobuf/types" "github.com/opencontainers/image-spec/identity" imagespec "github.com/opencontainers/image-spec/specs-go/v1" ) diff --git a/vendor/github.com/containerd/containerd/containerd.service b/vendor/github.com/containerd/containerd/containerd.service index e4c082b3a4..38a3459456 100644 --- a/vendor/github.com/containerd/containerd/containerd.service +++ b/vendor/github.com/containerd/containerd/containerd.service @@ -18,6 +18,8 @@ Documentation=https://containerd.io After=network.target local-fs.target [Service] +#uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration +#Environment="ENABLE_CRI_SANDBOXES=sandboxed" ExecStartPre=-/sbin/modprobe overlay ExecStart=/usr/local/bin/containerd diff --git a/vendor/github.com/containerd/containerd/containers/containers.go b/vendor/github.com/containerd/containerd/containers/containers.go index 7174bbd6aa..49f3891336 100644 --- a/vendor/github.com/containerd/containerd/containers/containers.go +++ b/vendor/github.com/containerd/containerd/containers/containers.go @@ -20,7 +20,7 @@ import ( "context" "time" - "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" ) // Container represents the set of data pinned by a container. Unless otherwise @@ -53,7 +53,7 @@ type Container struct { // container. // // This field is required but mutable. - Spec *types.Any + Spec typeurl.Any // SnapshotKey specifies the snapshot key to use for the container's root // filesystem. When starting a task from this container, a caller should @@ -75,13 +75,18 @@ type Container struct { UpdatedAt time.Time // Extensions stores client-specified metadata - Extensions map[string]types.Any + Extensions map[string]typeurl.Any + + // SandboxID is an identifier of sandbox this container belongs to. + // + // This property is optional, but can't be changed after creation. + SandboxID string } // RuntimeInfo holds runtime specific information type RuntimeInfo struct { Name string - Options *types.Any + Options typeurl.Any } // Store interacts with the underlying container storage diff --git a/vendor/github.com/containerd/containerd/containerstore.go b/vendor/github.com/containerd/containerd/containerstore.go index bdd1c6066a..331a6f41de 100644 --- a/vendor/github.com/containerd/containerd/containerstore.go +++ b/vendor/github.com/containerd/containerd/containerstore.go @@ -24,7 +24,9 @@ import ( containersapi "github.com/containerd/containerd/api/services/containers/v1" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" - ptypes "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf" + ptypes "github.com/containerd/containerd/protobuf/types" + "github.com/containerd/typeurl/v2" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -50,7 +52,7 @@ func (r *remoteContainers) Get(ctx context.Context, id string) (containers.Conta return containers.Container{}, errdefs.FromGRPC(err) } - return containerFromProto(&resp.Container), nil + return containerFromProto(resp.Container), nil } func (r *remoteContainers) List(ctx context.Context, filters ...string) ([]containers.Container, error) { @@ -114,7 +116,7 @@ func (r *remoteContainers) Create(ctx context.Context, container containers.Cont return containers.Container{}, errdefs.FromGRPC(err) } - return containerFromProto(&created.Container), nil + return containerFromProto(created.Container), nil } @@ -134,7 +136,7 @@ func (r *remoteContainers) Update(ctx context.Context, container containers.Cont return containers.Container{}, errdefs.FromGRPC(err) } - return containerFromProto(&updated.Container), nil + return containerFromProto(updated.Container), nil } @@ -147,19 +149,24 @@ func (r *remoteContainers) Delete(ctx context.Context, id string) error { } -func containerToProto(container *containers.Container) containersapi.Container { - return containersapi.Container{ +func containerToProto(container *containers.Container) *containersapi.Container { + extensions := make(map[string]*ptypes.Any) + for k, v := range container.Extensions { + extensions[k] = protobuf.FromAny(v) + } + return &containersapi.Container{ ID: container.ID, Labels: container.Labels, Image: container.Image, Runtime: &containersapi.Container_Runtime{ Name: container.Runtime.Name, - Options: container.Runtime.Options, + Options: protobuf.FromAny(container.Runtime.Options), }, - Spec: container.Spec, + Spec: protobuf.FromAny(container.Spec), Snapshotter: container.Snapshotter, SnapshotKey: container.SnapshotKey, - Extensions: container.Extensions, + Extensions: extensions, + Sandbox: container.SandboxID, } } @@ -171,6 +178,11 @@ func containerFromProto(containerpb *containersapi.Container) containers.Contain Options: containerpb.Runtime.Options, } } + extensions := make(map[string]typeurl.Any) + for k, v := range containerpb.Extensions { + v := v + extensions[k] = v + } return containers.Container{ ID: containerpb.ID, Labels: containerpb.Labels, @@ -179,18 +191,19 @@ func containerFromProto(containerpb *containersapi.Container) containers.Contain Spec: containerpb.Spec, Snapshotter: containerpb.Snapshotter, SnapshotKey: containerpb.SnapshotKey, - CreatedAt: containerpb.CreatedAt, - UpdatedAt: containerpb.UpdatedAt, - Extensions: containerpb.Extensions, + CreatedAt: protobuf.FromTimestamp(containerpb.CreatedAt), + UpdatedAt: protobuf.FromTimestamp(containerpb.UpdatedAt), + Extensions: extensions, + SandboxID: containerpb.Sandbox, } } -func containersFromProto(containerspb []containersapi.Container) []containers.Container { +func containersFromProto(containerspb []*containersapi.Container) []containers.Container { var containers []containers.Container for _, container := range containerspb { container := container - containers = append(containers, containerFromProto(&container)) + containers = append(containers, containerFromProto(container)) } return containers diff --git a/vendor/github.com/containerd/containerd/content/content.go b/vendor/github.com/containerd/containerd/content/content.go index ff17a8417b..8eb1a16924 100644 --- a/vendor/github.com/containerd/containerd/content/content.go +++ b/vendor/github.com/containerd/containerd/content/content.go @@ -25,6 +25,26 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) +// Store combines the methods of content-oriented interfaces into a set that +// are commonly provided by complete implementations. +// +// Overall content lifecycle: +// - Ingester is used to initiate a write operation (aka ingestion) +// - IngestManager is used to manage (e.g. list, abort) active ingestions +// - Once an ingestion is complete (see Writer.Commit), Provider is used to +// query a single piece of content by its digest +// - Manager is used to manage (e.g. list, delete) previously committed content +// +// Note that until ingestion is complete, its content is not visible through +// Provider or Manager. Once ingestion is complete, it is no longer exposed +// through IngestManager. +type Store interface { + Manager + Provider + IngestManager + Ingester +} + // ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer type ReaderAt interface { io.ReaderAt @@ -42,14 +62,31 @@ type Provider interface { // Ingester writes content type Ingester interface { - // Some implementations require WithRef to be included in opts. + // Writer initiates a writing operation (aka ingestion). A single ingestion + // is uniquely identified by its ref, provided using a WithRef option. + // Writer can be called multiple times with the same ref to access the same + // ingestion. + // Once all the data is written, use Writer.Commit to complete the ingestion. Writer(ctx context.Context, opts ...WriterOpt) (Writer, error) } +// IngestManager provides methods for managing ingestions. An ingestion is a +// not-yet-complete writing operation initiated using Ingester and identified +// by a ref string. +type IngestManager interface { + // Status returns the status of the provided ref. + Status(ctx context.Context, ref string) (Status, error) + + // ListStatuses returns the status of any active ingestions whose ref match + // the provided regular expression. If empty, all active ingestions will be + // returned. + ListStatuses(ctx context.Context, filters ...string) ([]Status, error) + + // Abort completely cancels the ingest operation targeted by ref. + Abort(ctx context.Context, ref string) error +} + // Info holds content specific information -// -// TODO(stevvooe): Consider a very different name for this struct. Info is way -// to general. It also reads very weird in certain context, like pluralization. type Info struct { Digest digest.Digest Size int64 @@ -58,7 +95,7 @@ type Info struct { Labels map[string]string } -// Status of a content operation +// Status of a content operation (i.e. an ingestion) type Status struct { Ref string Offset int64 @@ -71,12 +108,17 @@ type Status struct { // WalkFunc defines the callback for a blob walk. type WalkFunc func(Info) error -// Manager provides methods for inspecting, listing and removing content. -type Manager interface { +// InfoProvider provides info for content inspection. +type InfoProvider interface { // Info will return metadata about content available in the content store. // // If the content is not present, ErrNotFound will be returned. Info(ctx context.Context, dgst digest.Digest) (Info, error) +} + +// Manager provides methods for inspecting, listing and removing content. +type Manager interface { + InfoProvider // Update updates mutable information related to content. // If one or more fieldpaths are provided, only those @@ -94,21 +136,7 @@ type Manager interface { Delete(ctx context.Context, dgst digest.Digest) error } -// IngestManager provides methods for managing ingests. -type IngestManager interface { - // Status returns the status of the provided ref. - Status(ctx context.Context, ref string) (Status, error) - - // ListStatuses returns the status of any active ingestions whose ref match the - // provided regular expression. If empty, all active ingestions will be - // returned. - ListStatuses(ctx context.Context, filters ...string) ([]Status, error) - - // Abort completely cancels the ingest operation targeted by ref. - Abort(ctx context.Context, ref string) error -} - -// Writer handles the write of content into a content store +// Writer handles writing of content into a content store type Writer interface { // Close closes the writer, if the writer has not been // committed this allows resuming or aborting. @@ -131,15 +159,6 @@ type Writer interface { Truncate(size int64) error } -// Store combines the methods of content-oriented interfaces into a set that -// are commonly provided by complete implementations. -type Store interface { - Manager - Provider - IngestManager - Ingester -} - // Opt is used to alter the mutable properties of content type Opt func(*Info) error diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go index 23ddc452f8..5404109a6d 100644 --- a/vendor/github.com/containerd/containerd/content/helpers.go +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -43,16 +43,26 @@ var bufPool = sync.Pool{ }, } +type reader interface { + Reader() io.Reader +} + // NewReader returns a io.Reader from a ReaderAt func NewReader(ra ReaderAt) io.Reader { - rd := io.NewSectionReader(ra, 0, ra.Size()) - return rd + if rd, ok := ra.(reader); ok { + return rd.Reader() + } + return io.NewSectionReader(ra, 0, ra.Size()) } // ReadBlob retrieves the entire contents of the blob from the provider. // // Avoid using this for large blobs, such as layers. func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) { + if int64(len(desc.Data)) == desc.Size && digest.FromBytes(desc.Data) == desc.Digest { + return desc.Data, nil + } + ra, err := provider.ReaderAt(ctx, desc) if err != nil { return nil, err diff --git a/vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go b/vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go new file mode 100644 index 0000000000..a523f28d91 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go @@ -0,0 +1,76 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "bufio" + "bytes" + "context" + _ "crypto/sha256" + "io" + "testing" + + "github.com/opencontainers/go-digest" + + "github.com/containerd/containerd/content" +) + +func FuzzContentStoreWriter(data []byte) int { + t := &testing.T{} + ctx := context.Background() + ctx, _, cs, cleanup := contentStoreEnv(t) + defer cleanup() + + cw, err := cs.Writer(ctx, content.WithRef("myref")) + if err != nil { + return 0 + } + if err := cw.Close(); err != nil { + return 0 + } + + // reopen, so we can test things + cw, err = cs.Writer(ctx, content.WithRef("myref")) + if err != nil { + return 0 + } + + err = checkCopyFuzz(int64(len(data)), cw, bufio.NewReader(io.NopCloser(bytes.NewReader(data)))) + if err != nil { + return 0 + } + expected := digest.FromBytes(data) + + if err = cw.Commit(ctx, int64(len(data)), expected); err != nil { + return 0 + } + return 1 +} + +func checkCopyFuzz(size int64, dst io.Writer, src io.Reader) error { + nn, err := io.Copy(dst, src) + if err != nil { + return err + } + + if nn != size { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/content/local/readerat.go b/vendor/github.com/containerd/containerd/content/local/readerat.go index a83c171bbd..899e85c0ba 100644 --- a/vendor/github.com/containerd/containerd/content/local/readerat.go +++ b/vendor/github.com/containerd/containerd/content/local/readerat.go @@ -18,6 +18,7 @@ package local import ( "fmt" + "io" "os" "github.com/containerd/containerd/content" @@ -65,3 +66,7 @@ func (ra sizeReaderAt) Size() int64 { func (ra sizeReaderAt) Close() error { return ra.fp.Close() } + +func (ra sizeReaderAt) Reader() io.Reader { + return io.LimitReader(ra.fp, ra.size) +} diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go index 1b01790a5d..baae3565bb 100644 --- a/vendor/github.com/containerd/containerd/content/local/store.go +++ b/vendor/github.com/containerd/containerd/content/local/store.go @@ -262,7 +262,7 @@ func (s *store) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) err return nil } - dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path)) + dgst := digest.NewDigestFromEncoded(alg, filepath.Base(path)) if err := dgst.Validate(); err != nil { // log error but don't report log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path") @@ -629,14 +629,14 @@ func (s *store) blobPath(dgst digest.Digest) (string, error) { return "", fmt.Errorf("cannot calculate blob path from invalid digest: %v: %w", err, errdefs.ErrInvalidArgument) } - return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()), nil + return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Encoded()), nil } func (s *store) ingestRoot(ref string) string { // we take a digest of the ref to keep the ingest paths constant length. // Note that this is not the current or potential digest of incoming content. dgst := digest.FromString(ref) - return filepath.Join(s.root, "ingest", dgst.Hex()) + return filepath.Join(s.root, "ingest", dgst.Encoded()) } // ingestPaths are returned. The paths are the following: diff --git a/vendor/github.com/containerd/containerd/content/local/store_bsd.go b/vendor/github.com/containerd/containerd/content/local/store_bsd.go index 42fddd3411..7dcc192327 100644 --- a/vendor/github.com/containerd/containerd/content/local/store_bsd.go +++ b/vendor/github.com/containerd/containerd/content/local/store_bsd.go @@ -1,5 +1,4 @@ //go:build darwin || freebsd || netbsd -// +build darwin freebsd netbsd /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/content/local/store_openbsd.go b/vendor/github.com/containerd/containerd/content/local/store_openbsd.go index 2b58b617b9..45dfa9997e 100644 --- a/vendor/github.com/containerd/containerd/content/local/store_openbsd.go +++ b/vendor/github.com/containerd/containerd/content/local/store_openbsd.go @@ -1,5 +1,4 @@ //go:build openbsd -// +build openbsd /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/content/local/store_unix.go b/vendor/github.com/containerd/containerd/content/local/store_unix.go index efa2eb9430..cb01c91c7f 100644 --- a/vendor/github.com/containerd/containerd/content/local/store_unix.go +++ b/vendor/github.com/containerd/containerd/content/local/store_unix.go @@ -1,5 +1,4 @@ //go:build linux || solaris -// +build linux solaris /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/content/local/test_helper.go b/vendor/github.com/containerd/containerd/content/local/test_helper.go new file mode 100644 index 0000000000..42de01cae8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/test_helper.go @@ -0,0 +1,38 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "context" + "testing" + + "github.com/containerd/containerd/content" +) + +func contentStoreEnv(t testing.TB) (context.Context, string, content.Store, func()) { + tmpdir := t.TempDir() + + cs, err := NewStore(tmpdir) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithCancel(context.Background()) + return ctx, tmpdir, cs, func() { + cancel() + } +} diff --git a/vendor/github.com/containerd/containerd/content/proxy/content_reader.go b/vendor/github.com/containerd/containerd/content/proxy/content_reader.go index 2947a7c821..893e3b0f23 100644 --- a/vendor/github.com/containerd/containerd/content/proxy/content_reader.go +++ b/vendor/github.com/containerd/containerd/content/proxy/content_reader.go @@ -36,9 +36,9 @@ func (ra *remoteReaderAt) Size() int64 { func (ra *remoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) { rr := &contentapi.ReadContentRequest{ - Digest: ra.digest, + Digest: ra.digest.String(), Offset: off, - Size_: int64(len(p)), + Size: int64(len(p)), } // we need a child context with cancel, or the eventually called // grpc.NewStream will leak the goroutine until the whole thing is cleared. diff --git a/vendor/github.com/containerd/containerd/content/proxy/content_store.go b/vendor/github.com/containerd/containerd/content/proxy/content_store.go index 217b746516..8e7fb42cf0 100644 --- a/vendor/github.com/containerd/containerd/content/proxy/content_store.go +++ b/vendor/github.com/containerd/containerd/content/proxy/content_store.go @@ -23,7 +23,8 @@ import ( contentapi "github.com/containerd/containerd/api/services/content/v1" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" - protobuftypes "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf" + protobuftypes "github.com/containerd/containerd/protobuf/types" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -42,7 +43,7 @@ func NewContentStore(client contentapi.ContentClient) content.Store { func (pcs *proxyContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { resp, err := pcs.client.Info(ctx, &contentapi.InfoRequest{ - Digest: dgst, + Digest: dgst.String(), }) if err != nil { return content.Info{}, errdefs.FromGRPC(err) @@ -81,7 +82,7 @@ func (pcs *proxyContentStore) Walk(ctx context.Context, fn content.WalkFunc, fil func (pcs *proxyContentStore) Delete(ctx context.Context, dgst digest.Digest) error { if _, err := pcs.client.Delete(ctx, &contentapi.DeleteContentRequest{ - Digest: dgst, + Digest: dgst.String(), }); err != nil { return errdefs.FromGRPC(err) } @@ -115,17 +116,17 @@ func (pcs *proxyContentStore) Status(ctx context.Context, ref string) (content.S status := resp.Status return content.Status{ Ref: status.Ref, - StartedAt: status.StartedAt, - UpdatedAt: status.UpdatedAt, + StartedAt: protobuf.FromTimestamp(status.StartedAt), + UpdatedAt: protobuf.FromTimestamp(status.UpdatedAt), Offset: status.Offset, Total: status.Total, - Expected: status.Expected, + Expected: digest.Digest(status.Expected), }, nil } func (pcs *proxyContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { resp, err := pcs.client.Update(ctx, &contentapi.UpdateRequest{ - Info: infoToGRPC(info), + Info: infoToGRPC(&info), UpdateMask: &protobuftypes.FieldMask{ Paths: fieldpaths, }, @@ -148,11 +149,11 @@ func (pcs *proxyContentStore) ListStatuses(ctx context.Context, filters ...strin for _, status := range resp.Statuses { statuses = append(statuses, content.Status{ Ref: status.Ref, - StartedAt: status.StartedAt, - UpdatedAt: status.UpdatedAt, + StartedAt: protobuf.FromTimestamp(status.StartedAt), + UpdatedAt: protobuf.FromTimestamp(status.UpdatedAt), Offset: status.Offset, Total: status.Total, - Expected: status.Expected, + Expected: digest.Digest(status.Expected), }) } @@ -197,10 +198,10 @@ func (pcs *proxyContentStore) negotiate(ctx context.Context, ref string, size in } if err := wrclient.Send(&contentapi.WriteContentRequest{ - Action: contentapi.WriteActionStat, + Action: contentapi.WriteAction_STAT, Ref: ref, Total: size, - Expected: expected, + Expected: expected.String(), }); err != nil { return nil, 0, err } @@ -213,22 +214,22 @@ func (pcs *proxyContentStore) negotiate(ctx context.Context, ref string, size in return wrclient, resp.Offset, nil } -func infoToGRPC(info content.Info) contentapi.Info { - return contentapi.Info{ - Digest: info.Digest, - Size_: info.Size, - CreatedAt: info.CreatedAt, - UpdatedAt: info.UpdatedAt, +func infoToGRPC(info *content.Info) *contentapi.Info { + return &contentapi.Info{ + Digest: info.Digest.String(), + Size: info.Size, + CreatedAt: protobuf.ToTimestamp(info.CreatedAt), + UpdatedAt: protobuf.ToTimestamp(info.UpdatedAt), Labels: info.Labels, } } -func infoFromGRPC(info contentapi.Info) content.Info { +func infoFromGRPC(info *contentapi.Info) content.Info { return content.Info{ - Digest: info.Digest, - Size: info.Size_, - CreatedAt: info.CreatedAt, - UpdatedAt: info.UpdatedAt, + Digest: digest.Digest(info.Digest), + Size: info.Size, + CreatedAt: protobuf.FromTimestamp(info.CreatedAt), + UpdatedAt: protobuf.FromTimestamp(info.UpdatedAt), Labels: info.Labels, } } diff --git a/vendor/github.com/containerd/containerd/content/proxy/content_writer.go b/vendor/github.com/containerd/containerd/content/proxy/content_writer.go index ffc0f50ea1..185115b0a4 100644 --- a/vendor/github.com/containerd/containerd/content/proxy/content_writer.go +++ b/vendor/github.com/containerd/containerd/content/proxy/content_writer.go @@ -24,6 +24,7 @@ import ( contentapi "github.com/containerd/containerd/api/services/content/v1" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/protobuf" digest "github.com/opencontainers/go-digest" ) @@ -45,7 +46,7 @@ func (rw *remoteWriter) send(req *contentapi.WriteContentRequest) (*contentapi.W if err == nil { // try to keep these in sync if resp.Digest != "" { - rw.digest = resp.Digest + rw.digest = digest.Digest(resp.Digest) } } @@ -54,7 +55,7 @@ func (rw *remoteWriter) send(req *contentapi.WriteContentRequest) (*contentapi.W func (rw *remoteWriter) Status() (content.Status, error) { resp, err := rw.send(&contentapi.WriteContentRequest{ - Action: contentapi.WriteActionStat, + Action: contentapi.WriteAction_STAT, }) if err != nil { return content.Status{}, fmt.Errorf("error getting writer status: %w", errdefs.FromGRPC(err)) @@ -64,8 +65,8 @@ func (rw *remoteWriter) Status() (content.Status, error) { Ref: rw.ref, Offset: resp.Offset, Total: resp.Total, - StartedAt: resp.StartedAt, - UpdatedAt: resp.UpdatedAt, + StartedAt: protobuf.FromTimestamp(resp.StartedAt), + UpdatedAt: protobuf.FromTimestamp(resp.UpdatedAt), }, nil } @@ -77,7 +78,7 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) { offset := rw.offset resp, err := rw.send(&contentapi.WriteContentRequest{ - Action: contentapi.WriteActionWrite, + Action: contentapi.WriteAction_WRITE, Offset: offset, Data: p, }) @@ -92,7 +93,7 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) { rw.offset += int64(n) if resp.Digest != "" { - rw.digest = resp.Digest + rw.digest = digest.Digest(resp.Digest) } return } @@ -112,10 +113,10 @@ func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest. } } resp, err := rw.send(&contentapi.WriteContentRequest{ - Action: contentapi.WriteActionCommit, + Action: contentapi.WriteAction_COMMIT, Total: size, Offset: rw.offset, - Expected: expected, + Expected: expected.String(), Labels: base.Labels, }) if err != nil { @@ -126,11 +127,12 @@ func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest. return fmt.Errorf("unexpected size: %v != %v", resp.Offset, size) } - if expected != "" && resp.Digest != expected { + actual := digest.Digest(resp.Digest) + if expected != "" && actual != expected { return fmt.Errorf("unexpected digest: %v != %v", resp.Digest, expected) } - rw.digest = resp.Digest + rw.digest = actual rw.offset = resp.Offset return nil } diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go index 8e2619a381..c79f9ba7df 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !darwin -// +build !windows,!darwin /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/diff.go b/vendor/github.com/containerd/containerd/diff.go index 28012b1f02..0b1d44ed5b 100644 --- a/vendor/github.com/containerd/containerd/diff.go +++ b/vendor/github.com/containerd/containerd/diff.go @@ -17,14 +17,9 @@ package containerd import ( - "context" - diffapi "github.com/containerd/containerd/api/services/diff/v1" - "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/diff" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/mount" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/containerd/containerd/diff/proxy" ) // DiffService handles the computation and application of diffs @@ -36,84 +31,5 @@ type DiffService interface { // NewDiffServiceFromClient returns a new diff service which communicates // over a GRPC connection. func NewDiffServiceFromClient(client diffapi.DiffClient) DiffService { - return &diffRemote{ - client: client, - } -} - -type diffRemote struct { - client diffapi.DiffClient -} - -func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) { - var config diff.ApplyConfig - for _, opt := range opts { - if err := opt(ctx, desc, &config); err != nil { - return ocispec.Descriptor{}, err - } - } - req := &diffapi.ApplyRequest{ - Diff: fromDescriptor(desc), - Mounts: fromMounts(mounts), - Payloads: config.ProcessorPayloads, - } - resp, err := r.client.Apply(ctx, req) - if err != nil { - return ocispec.Descriptor{}, errdefs.FromGRPC(err) - } - return toDescriptor(resp.Applied), nil -} - -func (r *diffRemote) Compare(ctx context.Context, a, b []mount.Mount, opts ...diff.Opt) (ocispec.Descriptor, error) { - var config diff.Config - for _, opt := range opts { - if err := opt(&config); err != nil { - return ocispec.Descriptor{}, err - } - } - req := &diffapi.DiffRequest{ - Left: fromMounts(a), - Right: fromMounts(b), - MediaType: config.MediaType, - Ref: config.Reference, - Labels: config.Labels, - } - resp, err := r.client.Diff(ctx, req) - if err != nil { - return ocispec.Descriptor{}, errdefs.FromGRPC(err) - } - return toDescriptor(resp.Diff), nil -} - -func toDescriptor(d *types.Descriptor) ocispec.Descriptor { - if d == nil { - return ocispec.Descriptor{} - } - return ocispec.Descriptor{ - MediaType: d.MediaType, - Digest: d.Digest, - Size: d.Size_, - Annotations: d.Annotations, - } -} - -func fromDescriptor(d ocispec.Descriptor) *types.Descriptor { - return &types.Descriptor{ - MediaType: d.MediaType, - Digest: d.Digest, - Size_: d.Size, - Annotations: d.Annotations, - } -} - -func fromMounts(mounts []mount.Mount) []*types.Mount { - apiMounts := make([]*types.Mount, len(mounts)) - for i, m := range mounts { - apiMounts[i] = &types.Mount{ - Type: m.Type, - Source: m.Source, - Options: m.Options, - } - } - return apiMounts + return proxy.NewDiffApplier(client).(DiffService) } diff --git a/vendor/github.com/containerd/containerd/diff/diff.go b/vendor/github.com/containerd/containerd/diff/diff.go index 235d6377c4..56c7e66940 100644 --- a/vendor/github.com/containerd/containerd/diff/diff.go +++ b/vendor/github.com/containerd/containerd/diff/diff.go @@ -19,9 +19,10 @@ package diff import ( "context" "io" + "time" "github.com/containerd/containerd/mount" - "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -44,6 +45,9 @@ type Config struct { // the MediaType of the target diff content to the compressor. // When using this config, MediaType must be specified as well. Compressor func(dest io.Writer, mediaType string) (io.WriteCloser, error) + + // SourceDateEpoch specifies the SOURCE_DATE_EPOCH without touching the env vars. + SourceDateEpoch *time.Time } // Opt is used to configure a diff operation @@ -62,7 +66,7 @@ type Comparer interface { // ApplyConfig is used to hold parameters needed for a apply operation type ApplyConfig struct { // ProcessorPayloads specifies the payload sent to various processors - ProcessorPayloads map[string]*types.Any + ProcessorPayloads map[string]typeurl.Any } // ApplyOpt is used to configure an Apply operation @@ -114,9 +118,18 @@ func WithLabels(labels map[string]string) Opt { } // WithPayloads sets the apply processor payloads to the config -func WithPayloads(payloads map[string]*types.Any) ApplyOpt { +func WithPayloads(payloads map[string]typeurl.Any) ApplyOpt { return func(_ context.Context, _ ocispec.Descriptor, c *ApplyConfig) error { c.ProcessorPayloads = payloads return nil } } + +// WithSourceDateEpoch specifies the timestamp used for whiteouts to provide control for reproducibility. +// See also https://reproducible-builds.org/docs/source-date-epoch/ . +func WithSourceDateEpoch(tm *time.Time) Opt { + return func(c *Config) error { + c.SourceDateEpoch = tm + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/diff/proxy/differ.go b/vendor/github.com/containerd/containerd/diff/proxy/differ.go new file mode 100644 index 0000000000..8ed8bdf4db --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/proxy/differ.go @@ -0,0 +1,131 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + + diffapi "github.com/containerd/containerd/api/services/diff/v1" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/pkg/epoch" + "github.com/containerd/containerd/protobuf" + ptypes "github.com/containerd/containerd/protobuf/types" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "google.golang.org/protobuf/types/known/timestamppb" +) + +// NewDiffApplier returns a new comparer and applier which communicates +// over a GRPC connection. +func NewDiffApplier(client diffapi.DiffClient) interface{} { + return &diffRemote{ + client: client, + } +} + +type diffRemote struct { + client diffapi.DiffClient +} + +func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) { + var config diff.ApplyConfig + for _, opt := range opts { + if err := opt(ctx, desc, &config); err != nil { + return ocispec.Descriptor{}, err + } + } + + payloads := make(map[string]*ptypes.Any) + for k, v := range config.ProcessorPayloads { + payloads[k] = protobuf.FromAny(v) + } + + req := &diffapi.ApplyRequest{ + Diff: fromDescriptor(desc), + Mounts: fromMounts(mounts), + Payloads: payloads, + } + resp, err := r.client.Apply(ctx, req) + if err != nil { + return ocispec.Descriptor{}, errdefs.FromGRPC(err) + } + return toDescriptor(resp.Applied), nil +} + +func (r *diffRemote) Compare(ctx context.Context, a, b []mount.Mount, opts ...diff.Opt) (ocispec.Descriptor, error) { + var config diff.Config + for _, opt := range opts { + if err := opt(&config); err != nil { + return ocispec.Descriptor{}, err + } + } + if tm := epoch.FromContext(ctx); tm != nil && config.SourceDateEpoch == nil { + config.SourceDateEpoch = tm + } + var sourceDateEpoch *timestamppb.Timestamp + if config.SourceDateEpoch != nil { + sourceDateEpoch = timestamppb.New(*config.SourceDateEpoch) + } + req := &diffapi.DiffRequest{ + Left: fromMounts(a), + Right: fromMounts(b), + MediaType: config.MediaType, + Ref: config.Reference, + Labels: config.Labels, + SourceDateEpoch: sourceDateEpoch, + } + resp, err := r.client.Diff(ctx, req) + if err != nil { + return ocispec.Descriptor{}, errdefs.FromGRPC(err) + } + return toDescriptor(resp.Diff), nil +} + +func toDescriptor(d *types.Descriptor) ocispec.Descriptor { + return ocispec.Descriptor{ + MediaType: d.MediaType, + Digest: digest.Digest(d.Digest), + Size: d.Size, + Annotations: d.Annotations, + } +} + +func fromDescriptor(d ocispec.Descriptor) *types.Descriptor { + return &types.Descriptor{ + MediaType: d.MediaType, + Digest: d.Digest.String(), + Size: d.Size, + Annotations: d.Annotations, + } +} + +func fromMounts(mounts []mount.Mount) []*types.Mount { + apiMounts := make([]*types.Mount, len(mounts)) + for i, m := range mounts { + apiMounts[i] = &types.Mount{ + Type: m.Type, + Source: m.Source, + Target: m.Target, + Options: m.Options, + } + } + return apiMounts +} diff --git a/vendor/github.com/containerd/containerd/diff/stream.go b/vendor/github.com/containerd/containerd/diff/stream.go index 44e35fcc81..b80e6d1a5c 100644 --- a/vendor/github.com/containerd/containerd/diff/stream.go +++ b/vendor/github.com/containerd/containerd/diff/stream.go @@ -24,7 +24,7 @@ import ( "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/images" - "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -46,7 +46,7 @@ func RegisterProcessor(handler Handler) { } // GetProcessor returns the processor for a media-type -func GetProcessor(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { +func GetProcessor(ctx context.Context, stream StreamProcessor, payloads map[string]typeurl.Any) (StreamProcessor, error) { // reverse this list so that user configured handlers come up first for i := len(handlers) - 1; i >= 0; i-- { processor, ok := handlers[i](ctx, stream.MediaType()) @@ -71,7 +71,7 @@ func StaticHandler(expectedMediaType string, fn StreamProcessorInit) Handler { } // StreamProcessorInit returns the initialized stream processor -type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) +type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]typeurl.Any) (StreamProcessor, error) // RawProcessor provides access to direct fd for processing type RawProcessor interface { @@ -93,7 +93,7 @@ func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorIn return nil, false } if compressed != "" { - return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]typeurl.Any) (StreamProcessor, error) { ds, err := compression.DecompressStream(stream) if err != nil { return nil, err @@ -104,7 +104,7 @@ func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorIn }, nil }, true } - return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]typeurl.Any) (StreamProcessor, error) { return &stdProcessor{ rc: stream, }, nil @@ -179,7 +179,7 @@ func BinaryHandler(id, returnsMediaType string, mediaTypes []string, path string } return func(_ context.Context, mediaType string) (StreamProcessorInit, bool) { if _, ok := set[mediaType]; ok { - return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]typeurl.Any) (StreamProcessor, error) { payload := payloads[id] return NewBinaryProcessor(ctx, mediaType, returnsMediaType, stream, path, args, env, payload) }, true diff --git a/vendor/github.com/containerd/containerd/diff/stream_unix.go b/vendor/github.com/containerd/containerd/diff/stream_unix.go index 6622c331ee..82fc31b402 100644 --- a/vendor/github.com/containerd/containerd/diff/stream_unix.go +++ b/vendor/github.com/containerd/containerd/diff/stream_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -28,20 +27,22 @@ import ( "os" "sync" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/containerd/protobuf/proto" + "github.com/containerd/typeurl/v2" exec "golang.org/x/sys/execabs" ) // NewBinaryProcessor returns a binary processor for use with processing content streams -func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args, env []string, payload *types.Any) (StreamProcessor, error) { +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args, env []string, payload typeurl.Any) (StreamProcessor, error) { cmd := exec.CommandContext(ctx, name, args...) cmd.Env = os.Environ() cmd.Env = append(cmd.Env, env...) var payloadC io.Closer if payload != nil { - data, err := proto.Marshal(payload) + pb := protobuf.FromAny(payload) + data, err := proto.Marshal(pb) if err != nil { return nil, err } @@ -87,6 +88,7 @@ func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProce r: r, mt: rmt, stderr: stderr, + done: make(chan struct{}), } go p.wait() @@ -109,6 +111,11 @@ type binaryProcessor struct { mu sync.Mutex err error + + // There is a race condition between waiting on c.cmd.Wait() and setting c.err within + // c.wait(), and reading that value from c.Err(). + // Use done to wait for the returned error to be captured and set. + done chan struct{} } func (c *binaryProcessor) Err() error { @@ -125,6 +132,16 @@ func (c *binaryProcessor) wait() { c.mu.Unlock() } } + close(c.done) +} + +func (c *binaryProcessor) Wait(ctx context.Context) error { + select { + case <-c.done: + return c.Err() + case <-ctx.Done(): + return ctx.Err() + } } func (c *binaryProcessor) File() *os.File { diff --git a/vendor/github.com/containerd/containerd/diff/stream_windows.go b/vendor/github.com/containerd/containerd/diff/stream_windows.go index c0bf03b94a..e6026f4e98 100644 --- a/vendor/github.com/containerd/containerd/diff/stream_windows.go +++ b/vendor/github.com/containerd/containerd/diff/stream_windows.go @@ -27,8 +27,9 @@ import ( "sync" winio "github.com/Microsoft/go-winio" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/containerd/protobuf/proto" + "github.com/containerd/typeurl/v2" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) @@ -36,13 +37,14 @@ import ( const processorPipe = "STREAM_PROCESSOR_PIPE" // NewBinaryProcessor returns a binary processor for use with processing content streams -func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args, env []string, payload *types.Any) (StreamProcessor, error) { +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args, env []string, payload typeurl.Any) (StreamProcessor, error) { cmd := exec.CommandContext(ctx, name, args...) cmd.Env = os.Environ() cmd.Env = append(cmd.Env, env...) if payload != nil { - data, err := proto.Marshal(payload) + pb := protobuf.FromAny(payload) + data, err := proto.Marshal(pb) if err != nil { return nil, err } @@ -96,6 +98,7 @@ func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProce r: r, mt: rmt, stderr: stderr, + done: make(chan struct{}), } go p.wait() @@ -115,6 +118,11 @@ type binaryProcessor struct { mu sync.Mutex err error + + // There is a race condition between waiting on c.cmd.Wait() and setting c.err within + // c.wait(), and reading that value from c.Err(). + // Use done to wait for the returned error to be captured and set. + done chan struct{} } func (c *binaryProcessor) Err() error { @@ -131,6 +139,16 @@ func (c *binaryProcessor) wait() { c.mu.Unlock() } } + close(c.done) +} + +func (c *binaryProcessor) Wait(ctx context.Context) error { + select { + case <-c.done: + return c.Err() + case <-ctx.Done(): + return ctx.Err() + } } func (c *binaryProcessor) File() *os.File { diff --git a/vendor/github.com/containerd/containerd/diff/walking/differ.go b/vendor/github.com/containerd/containerd/diff/walking/differ.go index ad20788617..34a5797e74 100644 --- a/vendor/github.com/containerd/containerd/diff/walking/differ.go +++ b/vendor/github.com/containerd/containerd/diff/walking/differ.go @@ -30,8 +30,10 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/labels" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/pkg/epoch" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -41,7 +43,6 @@ type walkingDiff struct { } var emptyDesc = ocispec.Descriptor{} -var uncompressed = "containerd.io/uncompressed" // NewWalkingDiff is a generic implementation of diff.Comparer. The diff is // calculated by mounting both the upper and lower mount sets and walking the @@ -64,6 +65,14 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o return emptyDesc, err } } + if tm := epoch.FromContext(ctx); tm != nil && config.SourceDateEpoch == nil { + config.SourceDateEpoch = tm + } + + var writeDiffOpts []archive.WriteDiffOpt + if config.SourceDateEpoch != nil { + writeDiffOpts = append(writeDiffOpts, archive.WithSourceDateEpoch(config.SourceDateEpoch)) + } var isCompressed bool if config.Compressor != nil { @@ -136,7 +145,7 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o return fmt.Errorf("failed to get compressed stream: %w", errOpen) } } - errOpen = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), lowerRoot, upperRoot) + errOpen = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), lowerRoot, upperRoot, writeDiffOpts...) compressed.Close() if errOpen != nil { return fmt.Errorf("failed to write compressed diff: %w", errOpen) @@ -145,9 +154,9 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o if config.Labels == nil { config.Labels = map[string]string{} } - config.Labels[uncompressed] = dgstr.Digest().String() + config.Labels[labels.LabelUncompressed] = dgstr.Digest().String() } else { - if errOpen = archive.WriteDiff(ctx, cw, lowerRoot, upperRoot); errOpen != nil { + if errOpen = archive.WriteDiff(ctx, cw, lowerRoot, upperRoot, writeDiffOpts...); errOpen != nil { return fmt.Errorf("failed to write diff: %w", errOpen) } } @@ -172,10 +181,10 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o if info.Labels == nil { info.Labels = make(map[string]string) } - // Set uncompressed label if digest already existed without label - if _, ok := info.Labels[uncompressed]; !ok { - info.Labels[uncompressed] = config.Labels[uncompressed] - if _, err := s.store.Update(ctx, info, "labels."+uncompressed); err != nil { + // Set "containerd.io/uncompressed" label if digest already existed without label + if _, ok := info.Labels[labels.LabelUncompressed]; !ok { + info.Labels[labels.LabelUncompressed] = config.Labels[labels.LabelUncompressed] + if _, err := s.store.Update(ctx, info, "labels."+labels.LabelUncompressed); err != nil { return fmt.Errorf("error setting uncompressed label: %w", err) } } diff --git a/vendor/github.com/containerd/containerd/events.go b/vendor/github.com/containerd/containerd/events.go index 3577b7c3a9..32d2dfc315 100644 --- a/vendor/github.com/containerd/containerd/events.go +++ b/vendor/github.com/containerd/containerd/events.go @@ -22,7 +22,8 @@ import ( eventsapi "github.com/containerd/containerd/api/services/events/v1" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events" - "github.com/containerd/typeurl" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/typeurl/v2" ) // EventService handles the publish, forward and subscribe of events. @@ -51,7 +52,7 @@ func (e *eventRemote) Publish(ctx context.Context, topic string, event events.Ev } req := &eventsapi.PublishRequest{ Topic: topic, - Event: any, + Event: protobuf.FromAny(any), } if _, err := e.client.Publish(ctx, req); err != nil { return errdefs.FromGRPC(err) @@ -62,10 +63,10 @@ func (e *eventRemote) Publish(ctx context.Context, topic string, event events.Ev func (e *eventRemote) Forward(ctx context.Context, envelope *events.Envelope) error { req := &eventsapi.ForwardRequest{ Envelope: &eventsapi.Envelope{ - Timestamp: envelope.Timestamp, + Timestamp: protobuf.ToTimestamp(envelope.Timestamp), Namespace: envelope.Namespace, Topic: envelope.Topic, - Event: envelope.Event, + Event: protobuf.FromAny(envelope.Event), }, } if _, err := e.client.Forward(ctx, req); err != nil { @@ -104,7 +105,7 @@ func (e *eventRemote) Subscribe(ctx context.Context, filters ...string) (ch <-ch select { case evq <- &events.Envelope{ - Timestamp: ev.Timestamp, + Timestamp: protobuf.FromTimestamp(ev.Timestamp), Namespace: ev.Namespace, Topic: ev.Topic, Event: ev.Event, diff --git a/vendor/github.com/containerd/containerd/events/events.go b/vendor/github.com/containerd/containerd/events/events.go index b7eb86f1eb..ff5642301f 100644 --- a/vendor/github.com/containerd/containerd/events/events.go +++ b/vendor/github.com/containerd/containerd/events/events.go @@ -20,8 +20,7 @@ import ( "context" "time" - "github.com/containerd/typeurl" - "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" ) // Envelope provides the packaging for an event. @@ -29,7 +28,7 @@ type Envelope struct { Timestamp time.Time Namespace string Topic string - Event *types.Any + Event typeurl.Any } // Field returns the value for the given fieldpath as a string, if defined. diff --git a/vendor/github.com/containerd/containerd/events/exchange/exchange.go b/vendor/github.com/containerd/containerd/events/exchange/exchange.go index a1f385d7ab..e18377c11a 100644 --- a/vendor/github.com/containerd/containerd/events/exchange/exchange.go +++ b/vendor/github.com/containerd/containerd/events/exchange/exchange.go @@ -28,10 +28,8 @@ import ( "github.com/containerd/containerd/identifiers" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" goevents "github.com/docker/go-events" - "github.com/gogo/protobuf/types" - "github.com/sirupsen/logrus" ) // Exchange broadcasts events @@ -60,10 +58,10 @@ func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err } defer func() { - logger := log.G(ctx).WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(log.Fields{ "topic": envelope.Topic, "ns": envelope.Namespace, - "type": envelope.Event.TypeUrl, + "type": envelope.Event.GetTypeUrl(), }) if err != nil { @@ -82,7 +80,6 @@ func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event) (err error) { var ( namespace string - encoded *types.Any envelope events.Envelope ) @@ -94,7 +91,7 @@ func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event return fmt.Errorf("envelope topic %q: %w", topic, err) } - encoded, err = typeurl.MarshalAny(event) + encoded, err := typeurl.MarshalAny(event) if err != nil { return err } @@ -105,10 +102,10 @@ func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event envelope.Event = encoded defer func() { - logger := log.G(ctx).WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(log.Fields{ "topic": envelope.Topic, "ns": envelope.Namespace, - "type": envelope.Event.TypeUrl, + "type": envelope.Event.GetTypeUrl(), }) if err != nil { diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go index 784df5dd95..40dc3ff6cb 100644 --- a/vendor/github.com/containerd/containerd/image.go +++ b/vendor/github.com/containerd/containerd/image.go @@ -28,6 +28,7 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" "github.com/containerd/containerd/pkg/kmutex" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/rootfs" @@ -64,6 +65,8 @@ type Image interface { Metadata() images.Image // Platform returns the platform match comparer. Can be nil. Platform() platforms.MatchComparer + // Spec returns the OCI image spec for a given image. + Spec(ctx context.Context) (ocispec.Image, error) } type usageOptions struct { @@ -279,6 +282,26 @@ func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, e return false, nil } +func (i *image) Spec(ctx context.Context) (ocispec.Image, error) { + var ociImage ocispec.Image + + desc, err := i.Config(ctx) + if err != nil { + return ociImage, fmt.Errorf("get image config descriptor: %w", err) + } + + blob, err := content.ReadBlob(ctx, i.ContentStore(), desc) + if err != nil { + return ociImage, fmt.Errorf("read image config from content store: %w", err) + } + + if err := json.Unmarshal(blob, &ociImage); err != nil { + return ociImage, fmt.Errorf("unmarshal image config %s: %w", blob, err) + } + + return ociImage, nil +} + // UnpackConfig provides configuration for the unpack of an image type UnpackConfig struct { // ApplyOpts for applying a diff to a snapshotter @@ -370,10 +393,10 @@ func (i *image) Unpack(ctx context.Context, snapshotterName string, opts ...Unpa cinfo := content.Info{ Digest: layer.Blob.Digest, Labels: map[string]string{ - "containerd.io/uncompressed": layer.Diff.Digest.String(), + labels.LabelUncompressed: layer.Diff.Digest.String(), }, } - if _, err := cs.Update(ctx, cinfo, "labels.containerd.io/uncompressed"); err != nil { + if _, err := cs.Update(ctx, cinfo, "labels."+labels.LabelUncompressed); err != nil { return err } } diff --git a/vendor/github.com/containerd/containerd/image_store.go b/vendor/github.com/containerd/containerd/image_store.go index a9702822af..524a7a6727 100644 --- a/vendor/github.com/containerd/containerd/image_store.go +++ b/vendor/github.com/containerd/containerd/image_store.go @@ -23,8 +23,12 @@ import ( "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" - ptypes "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/pkg/epoch" + "github.com/containerd/containerd/protobuf" + ptypes "github.com/containerd/containerd/protobuf/types" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "google.golang.org/protobuf/types/known/timestamppb" ) type remoteImages struct { @@ -61,14 +65,18 @@ func (s *remoteImages) List(ctx context.Context, filters ...string) ([]images.Im } func (s *remoteImages) Create(ctx context.Context, image images.Image) (images.Image, error) { - created, err := s.client.Create(ctx, &imagesapi.CreateImageRequest{ + req := &imagesapi.CreateImageRequest{ Image: imageToProto(&image), - }) + } + if tm := epoch.FromContext(ctx); tm != nil { + req.SourceDateEpoch = timestamppb.New(*tm) + } + created, err := s.client.Create(ctx, req) if err != nil { return images.Image{}, errdefs.FromGRPC(err) } - return imageFromProto(&created.Image), nil + return imageFromProto(created.Image), nil } func (s *remoteImages) Update(ctx context.Context, image images.Image, fieldpaths ...string) (images.Image, error) { @@ -78,16 +86,19 @@ func (s *remoteImages) Update(ctx context.Context, image images.Image, fieldpath Paths: fieldpaths, } } - - updated, err := s.client.Update(ctx, &imagesapi.UpdateImageRequest{ + req := &imagesapi.UpdateImageRequest{ Image: imageToProto(&image), UpdateMask: updateMask, - }) + } + if tm := epoch.FromContext(ctx); tm != nil { + req.SourceDateEpoch = timestamppb.New(*tm) + } + updated, err := s.client.Update(ctx, req) if err != nil { return images.Image{}, errdefs.FromGRPC(err) } - return imageFromProto(&updated.Image), nil + return imageFromProto(updated.Image), nil } func (s *remoteImages) Delete(ctx context.Context, name string, opts ...images.DeleteOpt) error { @@ -105,13 +116,13 @@ func (s *remoteImages) Delete(ctx context.Context, name string, opts ...images.D return errdefs.FromGRPC(err) } -func imageToProto(image *images.Image) imagesapi.Image { - return imagesapi.Image{ +func imageToProto(image *images.Image) *imagesapi.Image { + return &imagesapi.Image{ Name: image.Name, Labels: image.Labels, Target: descToProto(&image.Target), - CreatedAt: image.CreatedAt, - UpdatedAt: image.UpdatedAt, + CreatedAt: protobuf.ToTimestamp(image.CreatedAt), + UpdatedAt: protobuf.ToTimestamp(image.UpdatedAt), } } @@ -119,18 +130,18 @@ func imageFromProto(imagepb *imagesapi.Image) images.Image { return images.Image{ Name: imagepb.Name, Labels: imagepb.Labels, - Target: descFromProto(&imagepb.Target), - CreatedAt: imagepb.CreatedAt, - UpdatedAt: imagepb.UpdatedAt, + Target: descFromProto(imagepb.Target), + CreatedAt: protobuf.FromTimestamp(imagepb.CreatedAt), + UpdatedAt: protobuf.FromTimestamp(imagepb.UpdatedAt), } } -func imagesFromProto(imagespb []imagesapi.Image) []images.Image { +func imagesFromProto(imagespb []*imagesapi.Image) []images.Image { var images []images.Image for _, image := range imagespb { image := image - images = append(images, imageFromProto(&image)) + images = append(images, imageFromProto(image)) } return images @@ -139,17 +150,17 @@ func imagesFromProto(imagespb []imagesapi.Image) []images.Image { func descFromProto(desc *types.Descriptor) ocispec.Descriptor { return ocispec.Descriptor{ MediaType: desc.MediaType, - Size: desc.Size_, - Digest: desc.Digest, + Size: desc.Size, + Digest: digest.Digest(desc.Digest), Annotations: desc.Annotations, } } -func descToProto(desc *ocispec.Descriptor) types.Descriptor { - return types.Descriptor{ +func descToProto(desc *ocispec.Descriptor) *types.Descriptor { + return &types.Descriptor{ MediaType: desc.MediaType, - Size_: desc.Size, - Digest: desc.Digest, + Size: desc.Size, + Digest: desc.Digest.String(), Annotations: desc.Annotations, } } diff --git a/vendor/github.com/containerd/containerd/images/archive/exporter.go b/vendor/github.com/containerd/containerd/images/archive/exporter.go index 6943a7f8bc..b14302261d 100644 --- a/vendor/github.com/containerd/containerd/images/archive/exporter.go +++ b/vendor/github.com/containerd/containerd/images/archive/exporter.go @@ -89,6 +89,18 @@ func WithImage(is images.Store, name string) ExportOpt { } } +// WithImages adds multiples images to the exported archive. +func WithImages(imgs []images.Image) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + for _, img := range imgs { + img.Target.Annotations = addNameAnnotation(img.Name, img.Target.Annotations) + o.manifests = append(o.manifests, img.Target) + } + + return nil + } +} + // WithManifest adds a manifest to the exported archive. // When names are given they will be set on the manifest in the // exported archive, creating an index record for each name. diff --git a/vendor/github.com/containerd/containerd/images/archive/importer.go b/vendor/github.com/containerd/containerd/images/archive/importer.go index ea7891f217..3ca88091cf 100644 --- a/vendor/github.com/containerd/containerd/images/archive/importer.go +++ b/vendor/github.com/containerd/containerd/images/archive/importer.go @@ -31,6 +31,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" digest "github.com/opencontainers/go-digest" @@ -264,11 +265,11 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string } layers[i] = desc descs[desc.Digest] = &layers[i] - filters = append(filters, "labels.\"containerd.io/uncompressed\"=="+desc.Digest.String()) + filters = append(filters, fmt.Sprintf("labels.\"%s\"==%s", labels.LabelUncompressed, desc.Digest.String())) } err := store.Walk(ctx, func(info content.Info) error { - dgst, ok := info.Labels["containerd.io/uncompressed"] + dgst, ok := info.Labels[labels.LabelUncompressed] if ok { desc := descs[digest.Digest(dgst)] if desc != nil { @@ -308,7 +309,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string } ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) labels := map[string]string{ - "containerd.io/uncompressed": desc.Digest.String(), + labels.LabelUncompressed: desc.Digest.String(), } layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels)) if err != nil { diff --git a/vendor/github.com/containerd/containerd/images/archive/reference.go b/vendor/github.com/containerd/containerd/images/archive/reference.go index ba19b111f1..8a030fbfa5 100644 --- a/vendor/github.com/containerd/containerd/images/archive/reference.go +++ b/vendor/github.com/containerd/containerd/images/archive/reference.go @@ -41,6 +41,9 @@ func AddRefPrefix(image string) func(string) string { // a full reference. func refTranslator(image string, checkPrefix bool) func(string) string { return func(ref string) string { + if image == "" { + return "" + } // Check if ref is full reference if strings.ContainsAny(ref, "/:@") { // If not prefixed, don't include image diff --git a/vendor/github.com/containerd/containerd/images/converter/default.go b/vendor/github.com/containerd/containerd/images/converter/default.go index 65224bd81a..c67617e4cc 100644 --- a/vendor/github.com/containerd/containerd/images/converter/default.go +++ b/vendor/github.com/containerd/containerd/images/converter/default.go @@ -410,6 +410,7 @@ func writeJSON(ctx context.Context, cs content.Store, x interface{}, oldDesc oci return nil, err } if err := content.Copy(ctx, w, bytes.NewReader(b), int64(len(b)), dgst, content.WithLabels(labels)); err != nil { + w.Close() return nil, err } if err := w.Close(); err != nil { diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go index d45afe482c..2d2e36a9a3 100644 --- a/vendor/github.com/containerd/containerd/images/image.go +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -138,7 +138,7 @@ type platformManifest struct { // TODO(stevvooe): This violates the current platform agnostic approach to this // package by returning a specific manifest type. We'll need to refactor this // to return a manifest descriptor or decide that we want to bring the API in -// this direction because this abstraction is not needed.` +// this direction because this abstraction is not needed. func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { var ( limit = 1 @@ -311,7 +311,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", image.Digest, err) } - // TODO(stevvooe): It is possible that referenced conponents could have + // TODO(stevvooe): It is possible that referenced components could have // children, but this is rare. For now, we ignore this and only verify // that manifest components are present. required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...) diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go index 671e160e15..067963babb 100644 --- a/vendor/github.com/containerd/containerd/images/mediatypes.go +++ b/vendor/github.com/containerd/containerd/images/mediatypes.go @@ -38,7 +38,9 @@ const ( MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json" MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + // Checkpoint/Restore Media Types + MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar" MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar" MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar" @@ -47,9 +49,12 @@ const ( MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto" MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name" MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto" - // Legacy Docker schema1 manifest + + // MediaTypeDockerSchema1Manifest is the legacy Docker schema1 manifest MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" - // Encypted media types + + // Encrypted media types + MediaTypeImageLayerEncrypted = ocispec.MediaTypeImageLayer + "+encrypted" MediaTypeImageLayerGzipEncrypted = ocispec.MediaTypeImageLayerGzip + "+encrypted" ) @@ -93,16 +98,23 @@ func DiffCompression(ctx context.Context, mediaType string) (string, error) { // parseMediaTypes splits the media type into the base type and // an array of sorted extensions -func parseMediaTypes(mt string) (string, []string) { +func parseMediaTypes(mt string) (mediaType string, suffixes []string) { if mt == "" { return "", []string{} } + mediaType, ext, ok := strings.Cut(mt, "+") + if !ok { + return mediaType, []string{} + } - s := strings.Split(mt, "+") - ext := s[1:] - sort.Strings(ext) - - return s[0], ext + // Splitting the extensions following the mediatype "(+)gzip+encrypted". + // We expect this to be a limited list, so add an arbitrary limit (50). + // + // Note that DiffCompression is only using the last element, so perhaps we + // should split on the last "+" only. + suffixes = strings.SplitN(ext, "+", 50) + sort.Strings(suffixes) + return mediaType, suffixes } // IsNonDistributable returns true if the media type is non-distributable. @@ -118,8 +130,7 @@ func IsLayerType(mt string) bool { } // Parse Docker media types, strip off any + suffixes first - base, _ := parseMediaTypes(mt) - switch base { + switch base, _ := parseMediaTypes(mt); base { case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip: return true diff --git a/vendor/github.com/containerd/containerd/import.go b/vendor/github.com/containerd/containerd/import.go index 8936d88eff..ca55920dc4 100644 --- a/vendor/github.com/containerd/containerd/import.go +++ b/vendor/github.com/containerd/containerd/import.go @@ -38,6 +38,7 @@ type importOpts struct { allPlatforms bool platformMatcher platforms.MatchComparer compress bool + discardLayers bool } // ImportOpt allows the caller to specify import specific options @@ -105,6 +106,15 @@ func WithImportCompression() ImportOpt { } } +// WithDiscardUnpackedLayers allows the garbage collector to clean up +// layers from content store after unpacking. +func WithDiscardUnpackedLayers() ImportOpt { + return func(c *importOpts) error { + c.discardLayers = true + return nil + } +} + // Import imports an image from a Tar stream using reader. // Caller needs to specify importer. Future version may use oci.v1 as the default. // Note that unreferenced blobs may be imported to the content store as well. @@ -195,7 +205,11 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt } handler = images.FilterPlatforms(handler, platformMatcher) - handler = images.SetChildrenLabels(cs, handler) + if iopts.discardLayers { + handler = images.SetChildrenMappedLabels(cs, handler, images.ChildGCLabelsFilterLayers) + } else { + handler = images.SetChildrenLabels(cs, handler) + } if err := images.WalkNotEmpty(ctx, handler, index); err != nil { return nil, err } diff --git a/vendor/github.com/containerd/containerd/install.go b/vendor/github.com/containerd/containerd/install.go index 16cff08d24..a307960b8c 100644 --- a/vendor/github.com/containerd/containerd/install.go +++ b/vendor/github.com/containerd/containerd/install.go @@ -32,7 +32,8 @@ import ( "github.com/containerd/containerd/images" ) -// Install a binary image into the opt service +// Install a binary image into the opt service. +// More info: https://github.com/containerd/containerd/blob/main/docs/managed-opt.md. func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts) error { var config InstallConfig for _, o := range opts { @@ -70,7 +71,8 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts) ra.Close() return err } - if _, err := archive.Apply(ctx, path, r, archive.WithFilter(func(hdr *tar.Header) (bool, error) { + + filter := archive.WithFilter(func(hdr *tar.Header) (bool, error) { d := filepath.Dir(hdr.Name) result := d == binDir @@ -87,7 +89,15 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts) } } return result, nil - })); err != nil { + }) + + opts := []archive.ApplyOpt{filter} + + if runtime.GOOS == "windows" { + opts = append(opts, archive.WithNoSameOwner()) + } + + if _, err := archive.Apply(ctx, path, r, opts...); err != nil { r.Close() ra.Close() return err diff --git a/vendor/github.com/containerd/containerd/labels/labels.go b/vendor/github.com/containerd/containerd/labels/labels.go index d76ff2cf9c..0f9bab5c5d 100644 --- a/vendor/github.com/containerd/containerd/labels/labels.go +++ b/vendor/github.com/containerd/containerd/labels/labels.go @@ -19,3 +19,11 @@ package labels // LabelUncompressed is added to compressed layer contents. // The value is digest of the uncompressed content. const LabelUncompressed = "containerd.io/uncompressed" + +// LabelSharedNamespace is added to a namespace to allow that namespaces +// contents to be shared. +const LabelSharedNamespace = "containerd.io/namespace.shareable" + +// LabelDistributionSource is added to content to indicate its origin. +// e.g., "containerd.io/distribution.source.docker.io=library/redis" +const LabelDistributionSource = "containerd.io/distribution.source" diff --git a/vendor/github.com/containerd/containerd/labels/validate.go b/vendor/github.com/containerd/containerd/labels/validate.go index 1fd527adb3..f83b5dde29 100644 --- a/vendor/github.com/containerd/containerd/labels/validate.go +++ b/vendor/github.com/containerd/containerd/labels/validate.go @@ -24,15 +24,18 @@ import ( const ( maxSize = 4096 + // maximum length of key portion of error message if len of key + len of value > maxSize + keyMaxLen = 64 ) // Validate a label's key and value are under 4096 bytes func Validate(k, v string) error { - if (len(k) + len(v)) > maxSize { - if len(k) > 10 { - k = k[:10] + total := len(k) + len(v) + if total > maxSize { + if len(k) > keyMaxLen { + k = k[:keyMaxLen] } - return fmt.Errorf("label key and value greater than maximum size (%d bytes), key: %s: %w", maxSize, k, errdefs.ErrInvalidArgument) + return fmt.Errorf("label key and value length (%d bytes) greater than maximum size (%d bytes), key: %s: %w", total, maxSize, k, errdefs.ErrInvalidArgument) } return nil } diff --git a/vendor/github.com/containerd/containerd/leases/proxy/manager.go b/vendor/github.com/containerd/containerd/leases/proxy/manager.go index 96cd5e653b..ae42d8eb10 100644 --- a/vendor/github.com/containerd/containerd/leases/proxy/manager.go +++ b/vendor/github.com/containerd/containerd/leases/proxy/manager.go @@ -22,6 +22,7 @@ import ( leasesapi "github.com/containerd/containerd/api/services/leases/v1" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/protobuf" ) type proxyManager struct { @@ -53,7 +54,7 @@ func (pm *proxyManager) Create(ctx context.Context, opts ...leases.Opt) (leases. return leases.Lease{ ID: resp.Lease.ID, - CreatedAt: resp.Lease.CreatedAt, + CreatedAt: protobuf.FromTimestamp(resp.Lease.CreatedAt), Labels: resp.Lease.Labels, }, nil } @@ -84,7 +85,7 @@ func (pm *proxyManager) List(ctx context.Context, filters ...string) ([]leases.L for i := range resp.Leases { l[i] = leases.Lease{ ID: resp.Leases[i].ID, - CreatedAt: resp.Leases[i].CreatedAt, + CreatedAt: protobuf.FromTimestamp(resp.Leases[i].CreatedAt), Labels: resp.Leases[i].Labels, } } @@ -95,7 +96,7 @@ func (pm *proxyManager) List(ctx context.Context, filters ...string) ([]leases.L func (pm *proxyManager) AddResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { _, err := pm.client.AddResource(ctx, &leasesapi.AddResourceRequest{ ID: lease.ID, - Resource: leasesapi.Resource{ + Resource: &leasesapi.Resource{ ID: r.ID, Type: r.Type, }, @@ -106,7 +107,7 @@ func (pm *proxyManager) AddResource(ctx context.Context, lease leases.Lease, r l func (pm *proxyManager) DeleteResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { _, err := pm.client.DeleteResource(ctx, &leasesapi.DeleteResourceRequest{ ID: lease.ID, - Resource: leasesapi.Resource{ + Resource: &leasesapi.Resource{ ID: r.ID, Type: r.Type, }, diff --git a/vendor/github.com/containerd/containerd/metadata/adaptors.go b/vendor/github.com/containerd/containerd/metadata/adaptors.go index c5d576f844..dbff7bacd9 100644 --- a/vendor/github.com/containerd/containerd/metadata/adaptors.go +++ b/vendor/github.com/containerd/containerd/metadata/adaptors.go @@ -24,6 +24,7 @@ import ( "github.com/containerd/containerd/filters" "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/sandbox" "github.com/containerd/containerd/snapshots" ) @@ -149,6 +150,23 @@ func adaptSnapshot(info snapshots.Info) filters.Adaptor { }) } +func adaptSandbox(instance *sandbox.Sandbox) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "id": + return instance.ID, true + case "labels": + return checkMap(fieldpath[1:], instance.Labels) + default: + return "", false + } + }) +} + func checkMap(fieldpath []string, m map[string]string) (string, bool) { if len(m) == 0 { return "", false diff --git a/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go b/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go index 4201d7ba9e..8f8df33615 100644 --- a/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go +++ b/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go @@ -20,8 +20,10 @@ import ( "fmt" "time" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/containerd/protobuf/proto" + "github.com/containerd/containerd/protobuf/types" + "github.com/containerd/typeurl/v2" bolt "go.etcd.io/bbolt" ) @@ -151,7 +153,7 @@ func WriteTimestamps(bkt *bolt.Bucket, created, updated time.Time) error { // WriteExtensions will write a KV map to the given bucket, // where `K` is a string key and `V` is a protobuf's Any type that represents a generic extension. -func WriteExtensions(bkt *bolt.Bucket, extensions map[string]types.Any) error { +func WriteExtensions(bkt *bolt.Bucket, extensions map[string]typeurl.Any) error { if len(extensions) == 0 { return nil } @@ -162,8 +164,8 @@ func WriteExtensions(bkt *bolt.Bucket, extensions map[string]types.Any) error { } for name, ext := range extensions { - ext := ext - p, err := proto.Marshal(&ext) + ext := protobuf.FromAny(ext) + p, err := proto.Marshal(ext) if err != nil { return err } @@ -177,9 +179,9 @@ func WriteExtensions(bkt *bolt.Bucket, extensions map[string]types.Any) error { } // ReadExtensions will read back a map of extensions from the given bucket, previously written by WriteExtensions -func ReadExtensions(bkt *bolt.Bucket) (map[string]types.Any, error) { +func ReadExtensions(bkt *bolt.Bucket) (map[string]typeurl.Any, error) { var ( - extensions = make(map[string]types.Any) + extensions = make(map[string]typeurl.Any) ebkt = bkt.Bucket(bucketKeyExtensions) ) @@ -193,7 +195,7 @@ func ReadExtensions(bkt *bolt.Bucket) (map[string]types.Any, error) { return err } - extensions[string(k)] = t + extensions[string(k)] = &t return nil }); err != nil { return nil, err @@ -203,18 +205,19 @@ func ReadExtensions(bkt *bolt.Bucket) (map[string]types.Any, error) { } // WriteAny write a protobuf's Any type to the bucket -func WriteAny(bkt *bolt.Bucket, name []byte, any *types.Any) error { - if any == nil { +func WriteAny(bkt *bolt.Bucket, name []byte, any typeurl.Any) error { + pbany := protobuf.FromAny(any) + if pbany == nil { return nil } - data, err := proto.Marshal(any) + data, err := proto.Marshal(pbany) if err != nil { - return err + return fmt.Errorf("failed to marshal: %w", err) } if err := bkt.Put(name, data); err != nil { - return err + return fmt.Errorf("put failed: %w", err) } return nil diff --git a/vendor/github.com/containerd/containerd/metadata/buckets.go b/vendor/github.com/containerd/containerd/metadata/buckets.go index 516de1fc7a..8dcf10f475 100644 --- a/vendor/github.com/containerd/containerd/metadata/buckets.go +++ b/vendor/github.com/containerd/containerd/metadata/buckets.go @@ -44,75 +44,82 @@ // // Below is the current database schema. This should be updated each time // the structure is changed in addition to adding a migration and incrementing -// the database version. Note that `╘══*...*` refers to maps with arbitrary -// keys. +// the database version. +// Notes: // -// ├──version : - Latest version, see migrations -// └──v1 - Schema version bucket -// ╘══*namespace* -// ├──labels -// │  ╘══*key* : - Label value -// ├──image -// │  ╘══*image name* -// │   ├──createdat : - Created at -// │   ├──updatedat : - Updated at -// │   ├──target -// │   │  ├──digest : - Descriptor digest -// │   │  ├──mediatype : - Descriptor media type -// │   │  └──size : - Descriptor size -// │   └──labels -// │   ╘══*key* : - Label value -// ├──containers -// │  ╘══*container id* -// │   ├──createdat : - Created at -// │   ├──updatedat : - Updated at -// │   ├──spec : - Proto marshaled spec -// │   ├──image : - Image name -// │   ├──snapshotter : - Snapshotter name -// │   ├──snapshotKey : - Snapshot key -// │   ├──runtime -// │   │  ├──name : - Runtime name -// │   │  ├──extensions -// │   │  │  ╘══*name* : - Proto marshaled extension -// │   │  └──options : - Proto marshaled options -// │   └──labels -// │   ╘══*key* : - Label value -// ├──snapshots -// │  ╘══*snapshotter* -// │   ╘══*snapshot key* -// │    ├──name : - Snapshot name in backend -// │   ├──createdat : - Created at -// │   ├──updatedat : - Updated at -// │    ├──parent : - Parent snapshot name -// │   ├──children -// │   │  ╘══*snapshot key* : - Child snapshot reference -// │   └──labels -// │   ╘══*key* : - Label value -// ├──content -// │  ├──blob -// │  │ ╘══*blob digest* -// │  │ ├──createdat : - Created at -// │  │ ├──updatedat : - Updated at -// │  │   ├──size : - Blob size -// │  │ └──labels -// │  │ ╘══*key* : - Label value -// │  └──ingests -// │   ╘══*ingest reference* -// │    ├──ref : - Ingest reference in backend -// │   ├──expireat : - Time to expire ingest -// │   └──expected : - Expected commit digest -// └──leases -// ╘══*lease id* -//   ├──createdat : - Created at -// ├──labels -// │ ╘══*key* : - Label value -//   ├──snapshots -// │  ╘══*snapshotter* -// │   ╘══*snapshot key* : - Snapshot reference -//   ├──content -// │  ╘══*blob digest* : - Content blob reference -// └──ingests -//   ╘══*ingest reference* : - Content ingest reference +// - `╘══*...*` refers to maps with arbitrary keys +// +// - `version` is a key to a numeric value identifying the minor revisions +// of schema version +// +// - a namespace in a schema bucket cannot be named "version" +// +// └──v1 - Schema version bucket +// ├──version : - Latest version, see migrations +// ╘══*namespace* +// ├──labels +// │  ╘══*key* : - Label value +// ├──image +// │  ╘══*image name* +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │   ├──target +// │   │  ├──digest : - Descriptor digest +// │   │  ├──mediatype : - Descriptor media type +// │   │  └──size : - Descriptor size +// │   └──labels +// │   ╘══*key* : - Label value +// ├──containers +// │  ╘══*container id* +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │   ├──spec : - Proto marshaled spec +// │   ├──image : - Image name +// │   ├──snapshotter : - Snapshotter name +// │   ├──snapshotKey : - Snapshot key +// │   ├──runtime +// │   │  ├──name : - Runtime name +// │   │  ├──extensions +// │   │  │  ╘══*name* : - Proto marshaled extension +// │   │  └──options : - Proto marshaled options +// │   └──labels +// │   ╘══*key* : - Label value +// ├──snapshots +// │  ╘══*snapshotter* +// │   ╘══*snapshot key* +// │    ├──name : - Snapshot name in backend +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │    ├──parent : - Parent snapshot name +// │   ├──children +// │   │  ╘══*snapshot key* : - Child snapshot reference +// │   └──labels +// │   ╘══*key* : - Label value +// ├──content +// │  ├──blob +// │  │ ╘══*blob digest* +// │  │ ├──createdat : - Created at +// │  │ ├──updatedat : - Updated at +// │  │   ├──size : - Blob size +// │  │ └──labels +// │  │ ╘══*key* : - Label value +// │  └──ingests +// │   ╘══*ingest reference* +// │    ├──ref : - Ingest reference in backend +// │   ├──expireat : - Time to expire ingest +// │   └──expected : - Expected commit digest +// └──leases +// ╘══*lease id* +// ├──createdat : - Created at +// ├──labels +// │ ╘══*key* : - Label value +// ├──snapshots +// │  ╘══*snapshotter* +// │   ╘══*snapshot key* : - Snapshot reference +// ├──content +// │  ╘══*blob digest* : - Content blob reference +// └──ingests +// ╘══*ingest reference* : - Content ingest reference package metadata import ( @@ -131,6 +138,7 @@ var ( bucketKeyObjectBlob = []byte("blob") // stores content links bucketKeyObjectIngests = []byte("ingests") // stores ingest objects bucketKeyObjectLeases = []byte("leases") // stores leases + bucketKeyObjectSandboxes = []byte("sandboxes") // stores sandboxes bucketKeyDigest = []byte("digest") bucketKeyMediaType = []byte("mediatype") @@ -150,6 +158,7 @@ var ( bucketKeyExpected = []byte("expected") bucketKeyRef = []byte("ref") bucketKeyExpireAt = []byte("expireat") + bucketKeySandboxID = []byte("sandboxid") deprecatedBucketKeyObjectIngest = []byte("ingest") // stores ingest links, deprecated in v1.2 ) @@ -271,3 +280,19 @@ func createIngestBucket(tx *bolt.Tx, namespace, ref string) (*bolt.Bucket, error func getIngestBucket(tx *bolt.Tx, namespace, ref string) *bolt.Bucket { return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(ref)) } + +func createSandboxBucket(tx *bolt.Tx, namespace string) (*bolt.Bucket, error) { + return createBucketIfNotExists( + tx, + []byte(namespace), + bucketKeyObjectSandboxes, + ) +} + +func getSandboxBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket( + tx, + []byte(namespace), + bucketKeyObjectSandboxes, + ) +} diff --git a/vendor/github.com/containerd/containerd/metadata/containers.go b/vendor/github.com/containerd/containerd/metadata/containers.go index 97002e5886..d97d9c6cd1 100644 --- a/vendor/github.com/containerd/containerd/metadata/containers.go +++ b/vendor/github.com/containerd/containerd/metadata/containers.go @@ -30,8 +30,9 @@ import ( "github.com/containerd/containerd/labels" "github.com/containerd/containerd/metadata/boltutil" "github.com/containerd/containerd/namespaces" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf/proto" + "github.com/containerd/containerd/protobuf/types" + "github.com/containerd/typeurl/v2" bolt "go.etcd.io/bbolt" ) @@ -211,7 +212,7 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai if strings.HasPrefix(path, "extensions.") { if updated.Extensions == nil { - updated.Extensions = map[string]types.Any{} + updated.Extensions = map[string]typeurl.Any{} } key := strings.TrimPrefix(path, "extensions.") updated.Extensions[key] = container.Extensions[key] @@ -358,6 +359,8 @@ func readContainer(container *containers.Container, bkt *bolt.Bucket) error { } container.Extensions = extensions + case string(bucketKeySandboxID): + container.SandboxID = string(v) } return nil @@ -406,5 +409,9 @@ func writeContainer(bkt *bolt.Bucket, container *containers.Container) error { return err } + if err := bkt.Put(bucketKeySandboxID, []byte(container.SandboxID)); err != nil { + return err + } + return boltutil.WriteLabels(bkt, container.Labels) } diff --git a/vendor/github.com/containerd/containerd/metadata/content.go b/vendor/github.com/containerd/containerd/metadata/content.go index 66d0ee263e..2df665fcfc 100644 --- a/vendor/github.com/containerd/containerd/metadata/content.go +++ b/vendor/github.com/containerd/containerd/metadata/content.go @@ -398,7 +398,7 @@ func (cs *contentStore) Writer(ctx context.Context, opts ...content.WriterOpt) ( return nil } - if cs.shared { + if cs.shared || isSharedContent(tx, wOpts.Desc.Digest) { if st, err := cs.Store.Info(ctx, wOpts.Desc.Digest); err == nil { // Ensure the expected size is the same, it is likely // an error if the size is mismatched but the caller @@ -706,6 +706,33 @@ func (cs *contentStore) checkAccess(ctx context.Context, dgst digest.Digest) err }) } +func isSharedContent(tx *bolt.Tx, dgst digest.Digest) bool { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return false + } + // iterate through each namespace + v1c := v1bkt.Cursor() + for nk, _ := v1c.First(); nk != nil; nk, _ = v1c.Next() { + ns := string(nk) + lbkt := getNamespaceLabelsBucket(tx, ns) + if lbkt == nil { + continue + } + // iterate through each label + lbc := lbkt.Cursor() + for k, v := lbc.First(); k != nil; k, v = lbc.Next() { + if string(k) == labels.LabelSharedNamespace { + if string(v) == "true" && getBlobBucket(tx, ns, dgst) != nil { + return true + } + break + } + } + } + return false +} + func validateInfo(info *content.Info) error { for k, v := range info.Labels { if err := labels.Validate(k, v); err != nil { diff --git a/vendor/github.com/containerd/containerd/metadata/db.go b/vendor/github.com/containerd/containerd/metadata/db.go index 2d9cbf31a6..8241930a96 100644 --- a/vendor/github.com/containerd/containerd/metadata/db.go +++ b/vendor/github.com/containerd/containerd/metadata/db.go @@ -26,9 +26,13 @@ import ( "sync/atomic" "time" + eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/events" "github.com/containerd/containerd/gc" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/cleanup" "github.com/containerd/containerd/snapshots" bolt "go.etcd.io/bbolt" ) @@ -56,9 +60,18 @@ func WithPolicyIsolated(o *dbOptions) { o.shared = false } +// WithEventsPublisher adds an events publisher to the +// metadata db to directly publish events +func WithEventsPublisher(p events.Publisher) DBOpt { + return func(o *dbOptions) { + o.publisher = p + } +} + // dbOptions configure db options. type dbOptions struct { - shared bool + shared bool + publisher events.Publisher } // DB represents a metadata database backed by a bolt @@ -94,6 +107,9 @@ type DB struct { // set indicating whether any dirty flags are set mutationCallbacks []func(bool) + // collectible resources + collectors map[gc.ResourceType]Collector + dbopts dbOptions } @@ -215,8 +231,8 @@ func (m *DB) ContentStore() content.Store { return m.cs } -// Snapshotter returns a namespaced content store for -// the requested snapshotter name proxied to a snapshotter. +// Snapshotter returns a snapshotter for the requested snapshotter name +// proxied to a snapshotter. func (m *DB) Snapshotter(name string) snapshots.Snapshotter { sn, ok := m.ss[name] if !ok { @@ -265,6 +281,63 @@ func (m *DB) RegisterMutationCallback(fn func(bool)) { m.wlock.Unlock() } +// RegisterCollectibleResource registers a resource type which can be +// referenced by metadata resources and garbage collected. +// Collectible Resources are useful ephemeral resources which need to +// be tracked by go away after reboot or process restart. +// +// A few limitations to consider: +// - Collectible Resources cannot reference other resources. +// - A failure to complete collection will not fail the garbage collection, +// however, the resources can be collected in a later run. +// - Collectible Resources must track whether the resource is active and/or +// lease membership. +func (m *DB) RegisterCollectibleResource(t gc.ResourceType, c Collector) { + if t < resourceEnd { + panic("cannot re-register metadata resource") + } else if t >= gc.ResourceMax { + panic("resource type greater than max") + } + + m.wlock.Lock() + defer m.wlock.Unlock() + + if m.collectors == nil { + m.collectors = map[gc.ResourceType]Collector{} + } + + if _, ok := m.collectors[t]; ok { + panic("cannot register collectible type twice") + } + m.collectors[t] = c +} + +// namespacedEvent is used to handle any event for a namespace +type namespacedEvent struct { + namespace string + event interface{} +} + +func (m *DB) publishEvents(events []namespacedEvent) { + ctx := context.Background() + if publisher := m.dbopts.publisher; publisher != nil { + for _, ne := range events { + ctx := namespaces.WithNamespace(ctx, ne.namespace) + var topic string + switch ne.event.(type) { + case *eventstypes.SnapshotRemove: + topic = "/snapshot/remove" + default: + log.G(ctx).WithField("event", ne.event).Debug("unhandled event type from garbage collection removal") + continue + } + if err := publisher.Publish(ctx, topic, ne.event); err != nil { + log.G(ctx).WithError(err).WithField("topic", topic).Debug("publish event failed") + } + } + } +} + // GCStats holds the duration for the different phases of the garbage collector type GCStats struct { MetaD time.Duration @@ -281,13 +354,16 @@ func (s GCStats) Elapsed() time.Duration { func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { m.wlock.Lock() t1 := time.Now() + c := startGCContext(ctx, m.collectors) - marked, err := m.getMarked(ctx) + marked, err := m.getMarked(ctx, c) // Pass in gc context if err != nil { m.wlock.Unlock() + c.cancel(ctx) return nil, err } + events := []namespacedEvent{} if err := m.db.Update(func(tx *bolt.Tx) error { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -301,25 +377,43 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { if idx := strings.IndexRune(n.Key, '/'); idx > 0 { m.dirtySS[n.Key[:idx]] = struct{}{} } + // queue event to publish after successful commit } else if n.Type == ResourceContent || n.Type == ResourceIngest { m.dirtyCS = true } - return remove(ctx, tx, n) + + event, err := c.remove(ctx, tx, n) + if event != nil && err == nil { + events = append(events, + namespacedEvent{ + namespace: n.Namespace, + event: event, + }) + } + return err } - if err := scanAll(ctx, tx, rm); err != nil { + if err := c.scanAll(ctx, tx, rm); err != nil { // From gc context return fmt.Errorf("failed to scan and remove: %w", err) } return nil }); err != nil { m.wlock.Unlock() + c.cancel(ctx) return nil, err } var stats GCStats var wg sync.WaitGroup + // Flush events asynchronously after commit + wg.Add(1) + go func() { + m.publishEvents(events) + wg.Done() + }() + // reset dirty, no need for atomic inside of wlock.Lock m.dirty = 0 @@ -331,7 +425,7 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { log.G(ctx).WithField("snapshotter", snapshotterName).Debug("schedule snapshotter cleanup") go func(snapshotterName string) { st1 := time.Now() - m.cleanupSnapshotter(snapshotterName) + m.cleanupSnapshotter(ctx, snapshotterName) sl.Lock() stats.SnapshotD[snapshotterName] = time.Since(st1) @@ -348,7 +442,7 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { log.G(ctx).Debug("schedule content cleanup") go func() { ct1 := time.Now() - m.cleanupContent() + m.cleanupContent(ctx) stats.ContentD = time.Since(ct1) wg.Done() }() @@ -358,13 +452,15 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { stats.MetaD = time.Since(t1) m.wlock.Unlock() + c.finish(ctx) + wg.Wait() return stats, err } // getMarked returns all resources that are used. -func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) { +func (m *DB) getMarked(ctx context.Context, c *gcContext) (map[gc.Node]struct{}, error) { var marked map[gc.Node]struct{} if err := m.db.View(func(tx *bolt.Tx) error { ctx, cancel := context.WithCancel(ctx) @@ -383,7 +479,7 @@ func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) { } }() // Call roots - if err := scanRoots(ctx, tx, roots); err != nil { + if err := c.scanRoots(ctx, tx, roots); err != nil { // From gc context cancel() return err } @@ -392,7 +488,7 @@ func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) { refs := func(n gc.Node) ([]gc.Node, error) { var sn []gc.Node - if err := references(ctx, tx, n, func(nn gc.Node) { + if err := c.references(ctx, tx, n, func(nn gc.Node) { // From gc context sn = append(sn, nn) }); err != nil { return nil, err @@ -412,8 +508,8 @@ func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) { return marked, nil } -func (m *DB) cleanupSnapshotter(name string) (time.Duration, error) { - ctx := context.Background() +func (m *DB) cleanupSnapshotter(ctx context.Context, name string) (time.Duration, error) { + ctx = cleanup.Background(ctx) sn, ok := m.ss[name] if !ok { return 0, nil @@ -429,8 +525,8 @@ func (m *DB) cleanupSnapshotter(name string) (time.Duration, error) { return d, err } -func (m *DB) cleanupContent() (time.Duration, error) { - ctx := context.Background() +func (m *DB) cleanupContent(ctx context.Context) (time.Duration, error) { + ctx = cleanup.Background(ctx) if m.cs == nil { return 0, nil } diff --git a/vendor/github.com/containerd/containerd/metadata/gc.go b/vendor/github.com/containerd/containerd/metadata/gc.go index 60bf410a6d..5518a44872 100644 --- a/vendor/github.com/containerd/containerd/metadata/gc.go +++ b/vendor/github.com/containerd/containerd/metadata/gc.go @@ -20,9 +20,11 @@ import ( "bytes" "context" "fmt" + "sort" "strings" "time" + eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/gc" "github.com/containerd/containerd/log" bolt "go.etcd.io/bbolt" @@ -43,6 +45,10 @@ const ( ResourceLease // ResourceIngest specifies a content ingest ResourceIngest + // resourceEnd is the end of specified resource types + resourceEnd + // ResourceStream specifies a stream + ResourceStream ) const ( @@ -52,15 +58,162 @@ const ( var ( labelGCRoot = []byte("containerd.io/gc.root") + labelGCRef = []byte("containerd.io/gc.ref.") labelGCSnapRef = []byte("containerd.io/gc.ref.snapshot.") labelGCContentRef = []byte("containerd.io/gc.ref.content") labelGCExpire = []byte("containerd.io/gc.expire") labelGCFlat = []byte("containerd.io/gc.flat") ) +// CollectionContext manages a resource collection during a single run of +// the garbage collector. The context is responsible for managing access to +// resources as well as tracking removal. +// Implementations should defer any longer running operations to the Finish +// function and optimize other functions for running fast during garbage +// collection write locks. +type CollectionContext interface { + // Sends all known resources + All(func(gc.Node)) + + // Active sends all active resources + // Leased resources may be excluded since lease ownership should take + // precedence over active status. + Active(namespace string, fn func(gc.Node)) + + // Leased sends all resources associated with the given lease + Leased(namespace, lease string, fn func(gc.Node)) + + // Remove marks the given resource as removed + Remove(gc.Node) + + // Cancel is called to cleanup a context after a failed collection + Cancel() error + + // Finish is called to cleanup a context after a successful collection + Finish() error +} + +// Collector is an interface to manage resource collection for any collectible +// resource registered for garbage collection. +type Collector interface { + StartCollection(context.Context) (CollectionContext, error) + + ReferenceLabel() string +} + +type gcContext struct { + labelHandlers []referenceLabelHandler + contexts map[gc.ResourceType]CollectionContext +} + +type referenceLabelHandler struct { + key []byte + fn func(string, []byte, []byte, func(gc.Node)) +} + +func startGCContext(ctx context.Context, collectors map[gc.ResourceType]Collector) *gcContext { + var contexts map[gc.ResourceType]CollectionContext + labelHandlers := []referenceLabelHandler{ + { + key: labelGCContentRef, + fn: func(ns string, k, v []byte, fn func(gc.Node)) { + if ks := string(k); ks != string(labelGCContentRef) { + // Allow reference naming separated by . or /, ignore names + if ks[len(labelGCContentRef)] != '.' && ks[len(labelGCContentRef)] != '/' { + return + } + } + + fn(gcnode(ResourceContent, ns, string(v))) + }, + }, + { + key: labelGCSnapRef, + fn: func(ns string, k, v []byte, fn func(gc.Node)) { + snapshotter := k[len(labelGCSnapRef):] + if i := bytes.IndexByte(snapshotter, '/'); i >= 0 { + snapshotter = snapshotter[:i] + } + fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", snapshotter, v))) + }, + }, + } + if len(collectors) > 0 { + contexts = map[gc.ResourceType]CollectionContext{} + for rt, collector := range collectors { + rt := rt + c, err := collector.StartCollection(ctx) + if err != nil { + // Only skipping this resource this round + continue + } + + if reflabel := collector.ReferenceLabel(); reflabel != "" { + key := append(labelGCRef, reflabel...) + labelHandlers = append(labelHandlers, referenceLabelHandler{ + key: key, + fn: func(ns string, k, v []byte, fn func(gc.Node)) { + if ks := string(k); ks != string(key) { + // Allow reference naming separated by . or /, ignore names + if ks[len(key)] != '.' && ks[len(key)] != '/' { + return + } + } + + fn(gcnode(rt, ns, string(v))) + }, + }) + } + contexts[rt] = c + } + // Sort labelHandlers to ensure key seeking is always forwardS + sort.Slice(labelHandlers, func(i, j int) bool { + return bytes.Compare(labelHandlers[i].key, labelHandlers[j].key) < 0 + }) + } + return &gcContext{ + labelHandlers: labelHandlers, + contexts: contexts, + } +} + +func (c *gcContext) all(fn func(gc.Node)) { + for _, gctx := range c.contexts { + gctx.All(fn) + } +} + +func (c *gcContext) active(namespace string, fn func(gc.Node)) { + for _, gctx := range c.contexts { + gctx.Active(namespace, fn) + } +} + +func (c *gcContext) leased(namespace, lease string, fn func(gc.Node)) { + for _, gctx := range c.contexts { + gctx.Leased(namespace, lease, fn) + } +} + +func (c *gcContext) cancel(ctx context.Context) { + for _, gctx := range c.contexts { + if err := gctx.Cancel(); err != nil { + log.G(ctx).WithError(err).Error("failed to cancel collection context") + } + } +} + +func (c *gcContext) finish(ctx context.Context) { + for _, gctx := range c.contexts { + if err := gctx.Finish(); err != nil { + log.G(ctx).WithError(err).Error("failed to finish collection context") + } + } +} + // scanRoots sends the given channel "root" resources that are certainly used. // The caller could look the references of the resources to find all resources that are used. -func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { +func (c *gcContext) scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { v1bkt := tx.Bucket(bucketKeyVersion) if v1bkt == nil { return nil @@ -170,6 +323,8 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { } } + c.leased(ns, string(k), fn) + return nil }); err != nil { return err @@ -188,7 +343,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { contentKey := string(target.Get(bucketKeyDigest)) fn(gcnode(ResourceContent, ns, contentKey)) } - return sendLabelRefs(ns, ibkt.Bucket(k), fn) + return c.sendLabelRefs(ns, ibkt.Bucket(k), fn) }); err != nil { return err } @@ -247,7 +402,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", snapshotter, ss))) } - return sendLabelRefs(ns, cibkt, fn) + return c.sendLabelRefs(ns, cibkt, fn) }); err != nil { return err } @@ -274,12 +429,28 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { return err } } + + bbkt := nbkt.Bucket(bucketKeyObjectSandboxes) + if bbkt != nil { + if err := bbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + + sbbkt := bbkt.Bucket(k) + return c.sendLabelRefs(ns, sbbkt, fn) + }); err != nil { + return err + } + } + + c.active(ns, fn) } return cerr } // references finds the resources that are reachable from the given node. -func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error { +func (c *gcContext) references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error { switch node.Type { case ResourceContent: bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(node.Key)) @@ -288,15 +459,12 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) return nil } - return sendLabelRefs(node.Namespace, bkt, fn) + return c.sendLabelRefs(node.Namespace, bkt, fn) case ResourceSnapshot, resourceSnapshotFlat: - parts := strings.SplitN(node.Key, "/", 2) - if len(parts) != 2 { + ss, name, ok := strings.Cut(node.Key, "/") + if !ok { return fmt.Errorf("invalid snapshot gc key %s", node.Key) } - ss := parts[0] - name := parts[1] - bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectSnapshots, []byte(ss), []byte(name)) if bkt == nil { // Node may be created from dead edge @@ -312,7 +480,7 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) return nil } - return sendLabelRefs(node.Namespace, bkt, fn) + return c.sendLabelRefs(node.Namespace, bkt, fn) case ResourceIngest: // Send expected value bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(node.Key)) @@ -332,7 +500,7 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) } // scanAll finds all resources regardless whether the resources are used or not. -func scanAll(ctx context.Context, tx *bolt.Tx, fn func(ctx context.Context, n gc.Node) error) error { +func (c *gcContext) scanAll(ctx context.Context, tx *bolt.Tx, fn func(ctx context.Context, n gc.Node) error) error { v1bkt := tx.Bucket(bucketKeyVersion) if v1bkt == nil { return nil @@ -409,19 +577,27 @@ func scanAll(ctx context.Context, tx *bolt.Tx, fn func(ctx context.Context, n gc } } + c.all(func(n gc.Node) { + _ = fn(ctx, n) + }) + return nil } // remove all buckets for the given node. -func remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error { +func (c *gcContext) remove(ctx context.Context, tx *bolt.Tx, node gc.Node) (interface{}, error) { v1bkt := tx.Bucket(bucketKeyVersion) if v1bkt == nil { - return nil + return nil, nil } nsbkt := v1bkt.Bucket([]byte(node.Namespace)) if nsbkt == nil { - return nil + // Still remove object if refenced outside the db + if cc, ok := c.contexts[node.Type]; ok { + cc.Remove(node) + } + return nil, nil } switch node.Type { @@ -432,25 +608,28 @@ func remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error { } if cbkt != nil { log.G(ctx).WithField("key", node.Key).Debug("remove content") - return cbkt.DeleteBucket([]byte(node.Key)) + return nil, cbkt.DeleteBucket([]byte(node.Key)) } case ResourceSnapshot: sbkt := nsbkt.Bucket(bucketKeyObjectSnapshots) if sbkt != nil { - parts := strings.SplitN(node.Key, "/", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid snapshot gc key %s", node.Key) + ss, key, ok := strings.Cut(node.Key, "/") + if !ok { + return nil, fmt.Errorf("invalid snapshot gc key %s", node.Key) } - ssbkt := sbkt.Bucket([]byte(parts[0])) + ssbkt := sbkt.Bucket([]byte(ss)) if ssbkt != nil { - log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("remove snapshot") - return ssbkt.DeleteBucket([]byte(parts[1])) + log.G(ctx).WithField("key", key).WithField("snapshotter", ss).Debug("remove snapshot") + return &eventstypes.SnapshotRemove{ + Key: key, + Snapshotter: ss, + }, ssbkt.DeleteBucket([]byte(key)) } } case ResourceLease: lbkt := nsbkt.Bucket(bucketKeyObjectLeases) if lbkt != nil { - return lbkt.DeleteBucket([]byte(node.Key)) + return nil, lbkt.DeleteBucket([]byte(node.Key)) } case ResourceIngest: ibkt := nsbkt.Bucket(bucketKeyObjectContent) @@ -459,39 +638,31 @@ func remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error { } if ibkt != nil { log.G(ctx).WithField("ref", node.Key).Debug("remove ingest") - return ibkt.DeleteBucket([]byte(node.Key)) + return nil, ibkt.DeleteBucket([]byte(node.Key)) + } + default: + cc, ok := c.contexts[node.Type] + if ok { + cc.Remove(node) + } else { + log.G(ctx).WithField("ref", node.Key).WithField("type", node.Type).Info("no remove defined for resource") } } - return nil + return nil, nil } // sendLabelRefs sends all snapshot and content references referred to by the labels in the bkt -func sendLabelRefs(ns string, bkt *bolt.Bucket, fn func(gc.Node)) error { +func (c *gcContext) sendLabelRefs(ns string, bkt *bolt.Bucket, fn func(gc.Node)) error { lbkt := bkt.Bucket(bucketKeyObjectLabels) if lbkt != nil { lc := lbkt.Cursor() - - labelRef := string(labelGCContentRef) - for k, v := lc.Seek(labelGCContentRef); k != nil && strings.HasPrefix(string(k), labelRef); k, v = lc.Next() { - if ks := string(k); ks != labelRef { - // Allow reference naming separated by . or /, ignore names - if ks[len(labelRef)] != '.' && ks[len(labelRef)] != '/' { - continue - } + for i := range c.labelHandlers { + labelRef := string(c.labelHandlers[i].key) + for k, v := lc.Seek(c.labelHandlers[i].key); k != nil && strings.HasPrefix(string(k), labelRef); k, v = lc.Next() { + c.labelHandlers[i].fn(ns, k, v, fn) } - - fn(gcnode(ResourceContent, ns, string(v))) } - - for k, v := lc.Seek(labelGCSnapRef); k != nil && strings.HasPrefix(string(k), string(labelGCSnapRef)); k, v = lc.Next() { - snapshotter := k[len(labelGCSnapRef):] - if i := bytes.IndexByte(snapshotter, '/'); i >= 0 { - snapshotter = snapshotter[:i] - } - fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", snapshotter, v))) - } - } return nil } diff --git a/vendor/github.com/containerd/containerd/metadata/images.go b/vendor/github.com/containerd/containerd/metadata/images.go index 8355b712ef..ff5b624cce 100644 --- a/vendor/github.com/containerd/containerd/metadata/images.go +++ b/vendor/github.com/containerd/containerd/metadata/images.go @@ -31,6 +31,7 @@ import ( "github.com/containerd/containerd/labels" "github.com/containerd/containerd/metadata/boltutil" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/epoch" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" bolt "go.etcd.io/bbolt" @@ -144,7 +145,15 @@ func (s *imageStore) Create(ctx context.Context, image images.Image) (images.Ima return fmt.Errorf("image %q: %w", image.Name, errdefs.ErrAlreadyExists) } - image.CreatedAt = time.Now().UTC() + // The value of `image.CreatedAt` passed from the caller is discarded here. + // Ideally we should return an error when the value is already set. + // However, as `image.CreatedAt` is defined as a non-pointer `time.Time`, we can't compare it to nil. + // And we can't compare it to `time.Time{}` either, as `time.Time{}` is a proper timestamp (1970-01-01 00:00:00). + if tm := epoch.FromContext(ctx); tm != nil { + image.CreatedAt = tm.UTC() + } else { + image.CreatedAt = time.Now().UTC() + } image.UpdatedAt = image.CreatedAt return writeImage(ibkt, &image) }); err != nil { @@ -228,7 +237,11 @@ func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths } updated.CreatedAt = createdat - updated.UpdatedAt = time.Now().UTC() + if tm := epoch.FromContext(ctx); tm != nil { + updated.UpdatedAt = tm.UTC() + } else { + updated.UpdatedAt = time.Now().UTC() + } return writeImage(ibkt, &updated) }); err != nil { return images.Image{}, err diff --git a/vendor/github.com/containerd/containerd/metadata/sandbox.go b/vendor/github.com/containerd/containerd/metadata/sandbox.go new file mode 100644 index 0000000000..5766647d33 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/sandbox.go @@ -0,0 +1,373 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/identifiers" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/namespaces" + api "github.com/containerd/containerd/sandbox" + "github.com/containerd/typeurl/v2" + "go.etcd.io/bbolt" +) + +type sandboxStore struct { + db *DB +} + +var _ api.Store = (*sandboxStore)(nil) + +// NewSandboxStore creates a datababase client for sandboxes +func NewSandboxStore(db *DB) api.Store { + return &sandboxStore{db: db} +} + +// Create a sandbox record in the store +func (s *sandboxStore) Create(ctx context.Context, sandbox api.Sandbox) (api.Sandbox, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return api.Sandbox{}, err + } + + sandbox.CreatedAt = time.Now().UTC() + sandbox.UpdatedAt = sandbox.CreatedAt + + if err := s.validate(&sandbox); err != nil { + return api.Sandbox{}, fmt.Errorf("failed to validate sandbox: %w", err) + } + + if err := s.db.Update(func(tx *bbolt.Tx) error { + parent, err := createSandboxBucket(tx, ns) + if err != nil { + return fmt.Errorf("create error: %w", err) + } + + if err := s.write(parent, &sandbox, false); err != nil { + return fmt.Errorf("write error: %w", err) + } + + return nil + }); err != nil { + return api.Sandbox{}, err + } + + return sandbox, nil +} + +// Update the sandbox with the provided sandbox object and fields +func (s *sandboxStore) Update(ctx context.Context, sandbox api.Sandbox, fieldpaths ...string) (api.Sandbox, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return api.Sandbox{}, err + } + + ret := api.Sandbox{} + if err := update(ctx, s.db, func(tx *bbolt.Tx) error { + parent := getSandboxBucket(tx, ns) + if parent == nil { + return fmt.Errorf("no sandbox buckets: %w", errdefs.ErrNotFound) + } + + updated, err := s.read(parent, []byte(sandbox.ID)) + if err != nil { + return err + } + + if len(fieldpaths) == 0 { + fieldpaths = []string{"labels", "extensions", "spec", "runtime"} + + if updated.Runtime.Name != sandbox.Runtime.Name { + return fmt.Errorf("sandbox.Runtime.Name field is immutable: %w", errdefs.ErrInvalidArgument) + } + } + + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = sandbox.Labels[key] + continue + } else if strings.HasPrefix(path, "extensions.") { + if updated.Extensions == nil { + updated.Extensions = map[string]typeurl.Any{} + } + + key := strings.TrimPrefix(path, "extensions.") + updated.Extensions[key] = sandbox.Extensions[key] + continue + } + + switch path { + case "labels": + updated.Labels = sandbox.Labels + case "extensions": + updated.Extensions = sandbox.Extensions + case "runtime": + updated.Runtime = sandbox.Runtime + case "spec": + updated.Spec = sandbox.Spec + default: + return fmt.Errorf("cannot update %q field on sandbox %q: %w", path, sandbox.ID, errdefs.ErrInvalidArgument) + } + } + + updated.UpdatedAt = time.Now().UTC() + + if err := s.validate(&updated); err != nil { + return err + } + + if err := s.write(parent, &updated, true); err != nil { + return err + } + + ret = updated + return nil + }); err != nil { + return api.Sandbox{}, err + } + + return ret, nil +} + +// Get sandbox metadata using the id +func (s *sandboxStore) Get(ctx context.Context, id string) (api.Sandbox, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return api.Sandbox{}, err + } + + ret := api.Sandbox{} + if err := view(ctx, s.db, func(tx *bbolt.Tx) error { + bucket := getSandboxBucket(tx, ns) + if bucket == nil { + return fmt.Errorf("no sandbox buckets: %w", errdefs.ErrNotFound) + } + + out, err := s.read(bucket, []byte(id)) + if err != nil { + return err + } + + ret = out + return nil + }); err != nil { + return api.Sandbox{}, err + } + + return ret, nil +} + +// List returns sandboxes that match one or more of the provided filters +func (s *sandboxStore) List(ctx context.Context, fields ...string) ([]api.Sandbox, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fields...) + if err != nil { + return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) + } + + var ( + list []api.Sandbox + ) + + if err := view(ctx, s.db, func(tx *bbolt.Tx) error { + bucket := getSandboxBucket(tx, ns) + if bucket == nil { + // We haven't created any sandboxes yet, just return empty list + return nil + } + + if err := bucket.ForEach(func(k, v []byte) error { + info, err := s.read(bucket, k) + if err != nil { + return fmt.Errorf("failed to read bucket %q: %w", string(k), err) + } + + if filter.Match(adaptSandbox(&info)) { + list = append(list, info) + } + + return nil + }); err != nil { + return err + } + + return nil + }); err != nil { + return nil, err + } + + return list, nil +} + +// Delete a sandbox from metadata store using the id +func (s *sandboxStore) Delete(ctx context.Context, id string) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + if err := update(ctx, s.db, func(tx *bbolt.Tx) error { + buckets := getSandboxBucket(tx, ns) + if buckets == nil { + return fmt.Errorf("no sandbox buckets: %w", errdefs.ErrNotFound) + } + + if err := buckets.DeleteBucket([]byte(id)); err != nil { + return fmt.Errorf("failed to delete sandbox %q: %w", id, err) + } + + return nil + }); err != nil { + return err + } + + return nil +} + +func (s *sandboxStore) write(parent *bbolt.Bucket, instance *api.Sandbox, overwrite bool) error { + var ( + bucket *bbolt.Bucket + err error + id = []byte(instance.ID) + ) + + if overwrite { + bucket, err = parent.CreateBucketIfNotExists(id) + if err != nil { + return err + } + } else { + bucket = parent.Bucket(id) + if bucket != nil { + return fmt.Errorf("sandbox bucket %q already exists: %w", instance.ID, errdefs.ErrAlreadyExists) + } + + bucket, err = parent.CreateBucket(id) + if err != nil { + return err + } + } + + if err := boltutil.WriteTimestamps(bucket, instance.CreatedAt, instance.UpdatedAt); err != nil { + return err + } + + if err := boltutil.WriteLabels(bucket, instance.Labels); err != nil { + return err + } + + if err := boltutil.WriteExtensions(bucket, instance.Extensions); err != nil { + return err + } + + if err := boltutil.WriteAny(bucket, bucketKeySpec, instance.Spec); err != nil { + return err + } + + runtimeBucket, err := bucket.CreateBucketIfNotExists(bucketKeyRuntime) + if err != nil { + return err + } + + if err := runtimeBucket.Put(bucketKeyName, []byte(instance.Runtime.Name)); err != nil { + return err + } + + if err := boltutil.WriteAny(runtimeBucket, bucketKeyOptions, instance.Runtime.Options); err != nil { + return err + } + + return nil +} + +func (s *sandboxStore) read(parent *bbolt.Bucket, id []byte) (api.Sandbox, error) { + var ( + inst api.Sandbox + err error + ) + + bucket := parent.Bucket(id) + if bucket == nil { + return api.Sandbox{}, fmt.Errorf("bucket %q not found: %w", id, errdefs.ErrNotFound) + } + + inst.ID = string(id) + + inst.Labels, err = boltutil.ReadLabels(bucket) + if err != nil { + return api.Sandbox{}, err + } + + if err := boltutil.ReadTimestamps(bucket, &inst.CreatedAt, &inst.UpdatedAt); err != nil { + return api.Sandbox{}, err + } + + inst.Spec, err = boltutil.ReadAny(bucket, bucketKeySpec) + if err != nil { + return api.Sandbox{}, err + } + + runtimeBucket := bucket.Bucket(bucketKeyRuntime) + if runtimeBucket == nil { + return api.Sandbox{}, errors.New("no runtime bucket") + } + + inst.Runtime.Name = string(runtimeBucket.Get(bucketKeyName)) + inst.Runtime.Options, err = boltutil.ReadAny(runtimeBucket, bucketKeyOptions) + if err != nil { + return api.Sandbox{}, err + } + + inst.Extensions, err = boltutil.ReadExtensions(bucket) + if err != nil { + return api.Sandbox{}, err + } + + return inst, nil +} + +func (s *sandboxStore) validate(new *api.Sandbox) error { + if err := identifiers.Validate(new.ID); err != nil { + return fmt.Errorf("invalid sandbox ID: %w", err) + } + + if new.CreatedAt.IsZero() { + return fmt.Errorf("creation date must not be zero: %w", errdefs.ErrInvalidArgument) + } + + if new.UpdatedAt.IsZero() { + return fmt.Errorf("updated date must not be zero: %w", errdefs.ErrInvalidArgument) + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/snapshot.go b/vendor/github.com/containerd/containerd/metadata/snapshot.go index 348602093a..e7774d36ec 100644 --- a/vendor/github.com/containerd/containerd/metadata/snapshot.go +++ b/vendor/github.com/containerd/containerd/metadata/snapshot.go @@ -24,6 +24,7 @@ import ( "sync/atomic" "time" + eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/filters" "github.com/containerd/containerd/labels" @@ -273,7 +274,22 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er } func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { - return s.createSnapshot(ctx, key, parent, false, opts) + mounts, err := s.createSnapshot(ctx, key, parent, false, opts) + if err != nil { + return nil, err + } + + if s.db.dbopts.publisher != nil { + if err := s.db.dbopts.publisher.Publish(ctx, "/snapshot/prepare", &eventstypes.SnapshotPrepare{ + Key: key, + Parent: parent, + Snapshotter: s.name, + }); err != nil { + return nil, err + } + } + + return mounts, nil } func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { @@ -618,6 +634,16 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap return err } + if s.db.dbopts.publisher != nil { + if err := s.db.dbopts.publisher.Publish(ctx, "/snapshot/commit", &eventstypes.SnapshotCommit{ + Key: key, + Name: name, + Snapshotter: s.name, + }); err != nil { + return err + } + } + return nil } @@ -631,7 +657,7 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error { return err } - return update(ctx, s.db, func(tx *bolt.Tx) error { + if err := update(ctx, s.db, func(tx *bolt.Tx) error { var sbkt *bolt.Bucket bkt := getSnapshotterBucket(tx, ns, s.name) if bkt != nil { @@ -674,7 +700,17 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error { s.db.dirtySS[s.name] = struct{}{} return nil - }) + }); err != nil { + return err + } + + if s.db.dbopts.publisher != nil { + return s.db.dbopts.publisher.Publish(ctx, "/snapshot/remove", &eventstypes.SnapshotRemove{ + Key: key, + Snapshotter: s.name, + }) + } + return nil } type infoPair struct { diff --git a/vendor/github.com/containerd/containerd/mount/fmountat_linux.go b/vendor/github.com/containerd/containerd/mount/fmountat_linux.go deleted file mode 100644 index 850a92acf6..0000000000 --- a/vendor/github.com/containerd/containerd/mount/fmountat_linux.go +++ /dev/null @@ -1,145 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package mount - -import ( - "fmt" - "runtime" - "syscall" - "unsafe" - - "github.com/containerd/containerd/log" - "golang.org/x/sys/unix" -) - -// fMountat performs mount from the provided directory. -func fMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data string) error { - var ( - sourceP, targetP, fstypeP, dataP *byte - pid uintptr - err error - errno, status syscall.Errno - ) - - sourceP, err = syscall.BytePtrFromString(source) - if err != nil { - return err - } - - targetP, err = syscall.BytePtrFromString(target) - if err != nil { - return err - } - - fstypeP, err = syscall.BytePtrFromString(fstype) - if err != nil { - return err - } - - if data != "" { - dataP, err = syscall.BytePtrFromString(data) - if err != nil { - return err - } - } - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - var pipefds [2]int - if err := syscall.Pipe2(pipefds[:], syscall.O_CLOEXEC); err != nil { - return fmt.Errorf("failed to open pipe: %w", err) - } - - defer func() { - // close both ends of the pipe in a deferred function, since open file - // descriptor table is shared with child - syscall.Close(pipefds[0]) - syscall.Close(pipefds[1]) - }() - - pid, errno = forkAndMountat(dirfd, - uintptr(unsafe.Pointer(sourceP)), - uintptr(unsafe.Pointer(targetP)), - uintptr(unsafe.Pointer(fstypeP)), - flags, - uintptr(unsafe.Pointer(dataP)), - pipefds[1], - ) - - if errno != 0 { - return fmt.Errorf("failed to fork thread: %w", errno) - } - - defer func() { - _, err := unix.Wait4(int(pid), nil, 0, nil) - for err == syscall.EINTR { - _, err = unix.Wait4(int(pid), nil, 0, nil) - } - - if err != nil { - log.L.WithError(err).Debugf("failed to find pid=%d process", pid) - } - }() - - _, _, errno = syscall.RawSyscall(syscall.SYS_READ, - uintptr(pipefds[0]), - uintptr(unsafe.Pointer(&status)), - unsafe.Sizeof(status)) - if errno != 0 { - return fmt.Errorf("failed to read pipe: %w", errno) - } - - if status != 0 { - return fmt.Errorf("failed to mount: %w", status) - } - - return nil -} - -// forkAndMountat will fork thread, change working dir and mount. -// -// precondition: the runtime OS thread must be locked. -func forkAndMountat(dirfd uintptr, source, target, fstype, flags, data uintptr, pipefd int) (pid uintptr, errno syscall.Errno) { - - // block signal during clone - beforeFork() - - // the cloned thread shares the open file descriptor, but the thread - // never be reused by runtime. - pid, _, errno = syscall.RawSyscall6(syscall.SYS_CLONE, uintptr(syscall.SIGCHLD)|syscall.CLONE_FILES, 0, 0, 0, 0, 0) - if errno != 0 || pid != 0 { - // restore all signals - afterFork() - return - } - - // restore all signals - afterForkInChild() - - // change working dir - _, _, errno = syscall.RawSyscall(syscall.SYS_FCHDIR, dirfd, 0, 0) - if errno != 0 { - goto childerr - } - _, _, errno = syscall.RawSyscall6(syscall.SYS_MOUNT, source, target, fstype, flags, data, 0) - -childerr: - _, _, errno = syscall.RawSyscall(syscall.SYS_WRITE, uintptr(pipefd), uintptr(unsafe.Pointer(&errno)), unsafe.Sizeof(errno)) - syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) - panic("unreachable") -} diff --git a/vendor/github.com/containerd/containerd/mount/lookup_unix.go b/vendor/github.com/containerd/containerd/mount/lookup_unix.go index 44881750b2..6fb16f6dd1 100644 --- a/vendor/github.com/containerd/containerd/mount/lookup_unix.go +++ b/vendor/github.com/containerd/containerd/mount/lookup_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go b/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go index 2e954b1ae5..1daf96d5c9 100644 --- a/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go +++ b/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/mount/losetup_linux.go b/vendor/github.com/containerd/containerd/mount/losetup_linux.go index 5bfd7f1893..e3647e9543 100644 --- a/vendor/github.com/containerd/containerd/mount/losetup_linux.go +++ b/vendor/github.com/containerd/containerd/mount/losetup_linux.go @@ -21,9 +21,7 @@ import ( "fmt" "os" "strings" - "syscall" "time" - "unsafe" "github.com/containerd/containerd/pkg/randutil" "golang.org/x/sys/unix" @@ -47,22 +45,13 @@ type LoopParams struct { Direct bool } -func ioctl(fd, req, args uintptr) (uintptr, uintptr, error) { - r1, r2, errno := syscall.Syscall(syscall.SYS_IOCTL, fd, req, args) - if errno != 0 { - return 0, 0, errno - } - - return r1, r2, nil -} - func getFreeLoopDev() (uint32, error) { ctrl, err := os.OpenFile(loopControlPath, os.O_RDWR, 0) if err != nil { return 0, fmt.Errorf("could not open %v: %v", loopControlPath, err) } defer ctrl.Close() - num, _, err := ioctl(ctrl.Fd(), unix.LOOP_CTL_GET_FREE, 0) + num, err := unix.IoctlRetInt(int(ctrl.Fd()), unix.LOOP_CTL_GET_FREE) if err != nil { return 0, fmt.Errorf("could not get free loop device: %w", err) } @@ -96,10 +85,16 @@ func setupLoopDev(backingFile, loopDev string, param LoopParams) (_ *os.File, re }() // 2. Set FD - if _, _, err = ioctl(loop.Fd(), unix.LOOP_SET_FD, back.Fd()); err != nil { + if err := unix.IoctlSetInt(int(loop.Fd()), unix.LOOP_SET_FD, int(back.Fd())); err != nil { return nil, fmt.Errorf("could not set loop fd for device: %s: %w", loopDev, err) } + defer func() { + if retErr != nil { + _ = unix.IoctlSetInt(int(loop.Fd()), unix.LOOP_CLR_FD, 0) + } + }() + // 3. Set Info info := unix.LoopInfo64{} copy(info.File_name[:], backingFile) @@ -111,27 +106,20 @@ func setupLoopDev(backingFile, loopDev string, param LoopParams) (_ *os.File, re info.Flags |= unix.LO_FLAGS_AUTOCLEAR } - if param.Direct { - info.Flags |= unix.LO_FLAGS_DIRECT_IO - } - - _, _, err = ioctl(loop.Fd(), unix.LOOP_SET_STATUS64, uintptr(unsafe.Pointer(&info))) - if err == nil { - return loop, nil + err = unix.IoctlLoopSetStatus64(int(loop.Fd()), &info) + if err != nil { + return nil, fmt.Errorf("failed to set loop device info: %w", err) } + // 4. Set Direct IO if param.Direct { - // Retry w/o direct IO flag in case kernel does not support it. The downside is that - // it will suffer from double cache problem. - info.Flags &= ^(uint32(unix.LO_FLAGS_DIRECT_IO)) - _, _, err = ioctl(loop.Fd(), unix.LOOP_SET_STATUS64, uintptr(unsafe.Pointer(&info))) - if err == nil { - return loop, nil + err = unix.IoctlSetInt(int(loop.Fd()), unix.LOOP_SET_DIRECT_IO, 1) + if err != nil { + return nil, fmt.Errorf("failed to setup loop with direct: %w", err) } } - _, _, _ = ioctl(loop.Fd(), unix.LOOP_CLR_FD, 0) - return nil, fmt.Errorf("failed to set loop device info: %v", err) + return loop, nil } // setupLoop looks for (and possibly creates) a free loop device, and @@ -182,8 +170,7 @@ func removeLoop(loopdev string) error { } defer file.Close() - _, _, err = ioctl(file.Fd(), unix.LOOP_CLR_FD, 0) - return err + return unix.IoctlSetInt(int(file.Fd()), unix.LOOP_CLR_FD, 0) } // AttachLoopDevice attaches a specified backing file to a loop device diff --git a/vendor/github.com/containerd/containerd/mount/mount.go b/vendor/github.com/containerd/containerd/mount/mount.go index 9dd4f32683..ae7520f980 100644 --- a/vendor/github.com/containerd/containerd/mount/mount.go +++ b/vendor/github.com/containerd/containerd/mount/mount.go @@ -17,7 +17,10 @@ package mount import ( + "fmt" "strings" + + "github.com/containerd/continuity/fs" ) // Mount is the lingua franca of containerd. A mount represents a @@ -28,12 +31,16 @@ type Mount struct { // Source specifies where to mount from. Depending on the host system, this // can be a source path or device. Source string + // Target specifies an optional subdirectory as a mountpoint. It assumes that + // the subdirectory exists in a parent mount. + Target string // Options contains zero or more fstab-style mount options. Typically, // these are platform specific. Options []string } -// All mounts all the provided mounts to the provided target +// All mounts all the provided mounts to the provided target. If submounts are +// present, it assumes that parent mounts come before child mounts. func All(mounts []Mount, target string) error { for _, m := range mounts { if err := m.Mount(target); err != nil { @@ -43,6 +50,44 @@ func All(mounts []Mount, target string) error { return nil } +// UnmountMounts unmounts all the mounts under a target in the reverse order of +// the mounts array provided. +func UnmountMounts(mounts []Mount, target string, flags int) error { + for i := len(mounts) - 1; i >= 0; i-- { + mountpoint, err := fs.RootPath(target, mounts[i].Target) + if err != nil { + return err + } + + if err := UnmountAll(mountpoint, flags); err != nil { + if i == len(mounts)-1 { // last mount + return err + } + } + } + return nil +} + +// ReadOnly returns a boolean value indicating whether this mount has the "ro" +// option set. +func (m *Mount) ReadOnly() bool { + for _, option := range m.Options { + if option == "ro" { + return true + } + } + return false +} + +// Mount to the provided target path. +func (m *Mount) Mount(target string) error { + target, err := fs.RootPath(target, m.Target) + if err != nil { + return fmt.Errorf("failed to join path %q with root %q: %w", m.Target, target, err) + } + return m.mount(target) +} + // readonlyMounts modifies the received mount options // to make them readonly func readonlyMounts(mounts []Mount) []Mount { diff --git a/vendor/github.com/containerd/containerd/mount/mount_freebsd.go b/vendor/github.com/containerd/containerd/mount/mount_freebsd.go index 3711383c61..f41df3dea4 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_freebsd.go +++ b/vendor/github.com/containerd/containerd/mount/mount_freebsd.go @@ -31,15 +31,12 @@ var ( ErrNotImplementOnUnix = errors.New("not implemented under unix") ) -// Mount to the provided target path. -func (m *Mount) Mount(target string) error { - // The "syscall" and "golang.org/x/sys/unix" packages do not define a Mount - // function for FreeBSD, so instead we execute mount(8) and trust it to do - // the right thing - return m.mountWithHelper(target) -} - -func (m *Mount) mountWithHelper(target string) error { +// Mount to the provided target. +// +// The "syscall" and "golang.org/x/sys/unix" packages do not define a Mount +// function for FreeBSD, so instead we execute mount(8) and trust it to do +// the right thing +func (m *Mount) mount(target string) error { // target: "/foo/target" // command: "mount -o ro -t nullfs /foo/source /foo/merged" // Note: FreeBSD mount(8) is particular about the order of flags and arguments diff --git a/vendor/github.com/containerd/containerd/mount/mount_linux.go b/vendor/github.com/containerd/containerd/mount/mount_linux.go index a69f65c2dd..b58e249d9f 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_linux.go +++ b/vendor/github.com/containerd/containerd/mount/mount_linux.go @@ -21,6 +21,7 @@ import ( "fmt" "os" "path" + "runtime" "strings" "time" @@ -41,7 +42,7 @@ func init() { // // If m.Type starts with "fuse." or "fuse3.", "mount.fuse" or "mount.fuse3" // helper binary is called. -func (m *Mount) Mount(target string) (err error) { +func (m *Mount) mount(target string) (err error) { for _, helperBinary := range allowedHelperBinaries { // helperBinary = "mount.fuse", typePrefix = "fuse." typePrefix := strings.TrimPrefix(helperBinary, "mount.") + "." @@ -62,9 +63,6 @@ func (m *Mount) Mount(target string) (err error) { } flags, data, losetup := parseMountOptions(options) - if len(data) > pagesize { - return errors.New("mount options is too long") - } // propagation types. const ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE @@ -72,15 +70,27 @@ func (m *Mount) Mount(target string) (err error) { // Ensure propagation type change flags aren't included in other calls. oflags := flags &^ ptypes + var loopParams LoopParams + if losetup { + loopParams = LoopParams{ + Readonly: oflags&unix.MS_RDONLY == unix.MS_RDONLY, + Autoclear: true, + } + loopParams.Direct, data = hasDirectIO(data) + } + + dataInStr := strings.Join(data, ",") + if len(dataInStr) > pagesize { + return errors.New("mount options is too long") + } + // In the case of remounting with changed data (data != ""), need to call mount (moby/moby#34077). - if flags&unix.MS_REMOUNT == 0 || data != "" { + if flags&unix.MS_REMOUNT == 0 || dataInStr != "" { // Initial call applying all non-propagation flags for mount // or remount with changed data source := m.Source if losetup { - loFile, err := setupLoop(m.Source, LoopParams{ - Readonly: oflags&unix.MS_RDONLY == unix.MS_RDONLY, - Autoclear: true}) + loFile, err := setupLoop(m.Source, loopParams) if err != nil { return err } @@ -89,7 +99,7 @@ func (m *Mount) Mount(target string) (err error) { // Mount the loop device instead source = loFile.Name() } - if err := mountAt(chdir, source, target, m.Type, uintptr(oflags), data); err != nil { + if err := mountAt(chdir, source, target, m.Type, uintptr(oflags), dataInStr); err != nil { return err } } @@ -198,7 +208,7 @@ func UnmountAll(mount string, flags int) error { // parseMountOptions takes fstab style mount options and parses them for // use with a standard mount() syscall -func parseMountOptions(options []string) (int, string, bool) { +func parseMountOptions(options []string) (int, []string, bool) { var ( flag int losetup bool @@ -251,7 +261,16 @@ func parseMountOptions(options []string) (int, string, bool) { data = append(data, o) } } - return flag, strings.Join(data, ","), losetup + return flag, data, losetup +} + +func hasDirectIO(opts []string) (bool, []string) { + for idx, opt := range opts { + if opt == "direct-io" { + return true, append(opts[:idx], opts[idx+1:]...) + } + } + return false, opts } // compactLowerdirOption updates overlay lowdir option and returns the common @@ -363,24 +382,29 @@ func mountAt(chdir string, source, target, fstype string, flags uintptr, data st return unix.Mount(source, target, fstype, flags, data) } - f, err := os.Open(chdir) - if err != nil { - return fmt.Errorf("failed to mountat: %w", err) - } - defer f.Close() + ch := make(chan error, 1) + go func() { + runtime.LockOSThread() - fs, err := f.Stat() - if err != nil { - return fmt.Errorf("failed to mountat: %w", err) - } + // Do not unlock this thread. + // If the thread is unlocked go will try to use it for other goroutines. + // However it is not possible to restore the thread state after CLONE_FS. + // + // Once the goroutine exits the thread should eventually be terminated by go. - if !fs.IsDir() { - return fmt.Errorf("failed to mountat: %s is not dir", chdir) - } - if err := fMountat(f.Fd(), source, target, fstype, flags, data); err != nil { - return fmt.Errorf("failed to mountat: %w", err) - } - return nil + if err := unix.Unshare(unix.CLONE_FS); err != nil { + ch <- err + return + } + + if err := unix.Chdir(chdir); err != nil { + ch <- err + return + } + + ch <- unix.Mount(source, target, fstype, flags, data) + }() + return <-ch } func (m *Mount) mountWithHelper(helperBinary, typePrefix, target string) error { diff --git a/vendor/github.com/containerd/containerd/mount/mount_unix.go b/vendor/github.com/containerd/containerd/mount/mount_unix.go index 795bb4bfe1..46dfd1c343 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_unix.go +++ b/vendor/github.com/containerd/containerd/mount/mount_unix.go @@ -1,5 +1,4 @@ -//go:build darwin || openbsd -// +build darwin openbsd +//go:build !windows && !darwin && !openbsd /* Copyright The containerd Authors. @@ -19,24 +18,44 @@ package mount -import "errors" +import ( + "sort" -var ( - // ErrNotImplementOnUnix is returned for methods that are not implemented - ErrNotImplementOnUnix = errors.New("not implemented under unix") + "github.com/moby/sys/mountinfo" ) -// Mount is not implemented on this platform -func (m *Mount) Mount(target string) error { - return ErrNotImplementOnUnix -} +// UnmountRecursive unmounts the target and all mounts underneath, starting +// with the deepest mount first. +func UnmountRecursive(target string, flags int) error { + if target == "" { + return nil + } + mounts, err := mountinfo.GetMounts(mountinfo.PrefixFilter(target)) + if err != nil { + return err + } -// Unmount is not implemented on this platform -func Unmount(mount string, flags int) error { - return ErrNotImplementOnUnix -} + targetSet := make(map[string]struct{}) + for _, m := range mounts { + targetSet[m.Mountpoint] = struct{}{} + } -// UnmountAll is not implemented on this platform -func UnmountAll(mount string, flags int) error { - return ErrNotImplementOnUnix + var targets []string + for m := range targetSet { + targets = append(targets, m) + } + + // Make the deepest mount be first + sort.SliceStable(targets, func(i, j int) bool { + return len(targets[i]) > len(targets[j]) + }) + + for i, target := range targets { + if err := UnmountAll(target, flags); err != nil { + if i == len(targets)-1 { // last mount + return err + } + } + } + return nil } diff --git a/vendor/github.com/containerd/containerd/mount/mount_unsupported.go b/vendor/github.com/containerd/containerd/mount/mount_unsupported.go new file mode 100644 index 0000000000..894467a993 --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/mount_unsupported.go @@ -0,0 +1,46 @@ +//go:build darwin || openbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +import "errors" + +var ( + // ErrNotImplementOnUnix is returned for methods that are not implemented + ErrNotImplementOnUnix = errors.New("not implemented under unix") +) + +// Mount is not implemented on this platform +func (m *Mount) mount(target string) error { + return ErrNotImplementOnUnix +} + +// Unmount is not implemented on this platform +func Unmount(mount string, flags int) error { + return ErrNotImplementOnUnix +} + +// UnmountAll is not implemented on this platform +func UnmountAll(mount string, flags int) error { + return ErrNotImplementOnUnix +} + +// UnmountRecursive is not implemented on this platform +func UnmountRecursive(mount string, flags int) error { + return ErrNotImplementOnUnix +} diff --git a/vendor/github.com/containerd/containerd/mount/mount_windows.go b/vendor/github.com/containerd/containerd/mount/mount_windows.go index 87fed8268e..7c24fa600c 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_windows.go +++ b/vendor/github.com/containerd/containerd/mount/mount_windows.go @@ -17,6 +17,7 @@ package mount import ( + "context" "encoding/json" "errors" "fmt" @@ -24,16 +25,21 @@ import ( "path/filepath" "strings" + "github.com/Microsoft/go-winio/pkg/bindfilter" "github.com/Microsoft/hcsshim" + "github.com/containerd/containerd/log" + "golang.org/x/sys/windows" ) +const sourceStreamName = "containerd.io-source" + var ( // ErrNotImplementOnWindows is returned when an action is not implemented for windows ErrNotImplementOnWindows = errors.New("not implemented under windows") ) -// Mount to the provided target -func (m *Mount) Mount(target string) error { +// Mount to the provided target. +func (m *Mount) mount(target string) (retErr error) { if m.Type != "windows-layer" { return fmt.Errorf("invalid windows mount type: '%s'", m.Type) } @@ -49,24 +55,60 @@ func (m *Mount) Mount(target string) error { HomeDir: home, } - if err = hcsshim.ActivateLayer(di, layerID); err != nil { + if err := hcsshim.ActivateLayer(di, layerID); err != nil { return fmt.Errorf("failed to activate layer %s: %w", m.Source, err) } + defer func() { + if retErr != nil { + if layerErr := hcsshim.DeactivateLayer(di, layerID); layerErr != nil { + log.G(context.TODO()).WithError(layerErr).Error("failed to deactivate layer during mount failure cleanup") + } + } + }() - if err = hcsshim.PrepareLayer(di, layerID, parentLayerPaths); err != nil { + if err := hcsshim.PrepareLayer(di, layerID, parentLayerPaths); err != nil { return fmt.Errorf("failed to prepare layer %s: %w", m.Source, err) } - // We can link the layer mount path to the given target. It is an UNC path, and it needs - // a trailing backslash. - mountPath, err := hcsshim.GetLayerMountPath(di, layerID) + defer func() { + if retErr != nil { + if layerErr := hcsshim.UnprepareLayer(di, layerID); layerErr != nil { + log.G(context.TODO()).WithError(layerErr).Error("failed to unprepare layer during mount failure cleanup") + } + } + }() + + volume, err := hcsshim.GetLayerMountPath(di, layerID) if err != nil { - return fmt.Errorf("failed to get layer mount path for %s: %w", m.Source, err) + return fmt.Errorf("failed to get volume path for layer %s: %w", m.Source, err) } - mountPath = mountPath + `\` - if err = os.Symlink(mountPath, target); err != nil { - return fmt.Errorf("failed to link mount to taget %s: %w", target, err) + + if len(parentLayerPaths) == 0 { + // this is a base layer. It gets mounted without going through WCIFS. We need to mount the Files + // folder, not the actual source, or the client may inadvertently remove metadata files. + volume = filepath.Join(volume, "Files") + if _, err := os.Stat(volume); err != nil { + return fmt.Errorf("no Files folder in layer %s", layerID) + } } + if err := bindfilter.ApplyFileBinding(target, volume, m.ReadOnly()); err != nil { + return fmt.Errorf("failed to set volume mount path for layer %s: %w", m.Source, err) + } + defer func() { + if retErr != nil { + if bindErr := bindfilter.RemoveFileBinding(target); bindErr != nil { + log.G(context.TODO()).WithError(bindErr).Error("failed to remove binding during mount failure cleanup") + } + } + }() + + // Add an Alternate Data Stream to record the layer source. + // See https://docs.microsoft.com/en-au/archive/blogs/askcore/alternate-data-streams-in-ntfs + // for details on Alternate Data Streams. + if err := os.WriteFile(filepath.Clean(target)+":"+sourceStreamName, []byte(m.Source), 0666); err != nil { + return fmt.Errorf("failed to record source for layer %s: %w", m.Source, err) + } + return nil } @@ -90,24 +132,59 @@ func (m *Mount) GetParentPaths() ([]string, error) { // Unmount the mount at the provided path func Unmount(mount string, flags int) error { - var ( - home, layerID = filepath.Split(mount) - di = hcsshim.DriverInfo{ - HomeDir: home, + mount = filepath.Clean(mount) + adsFile := mount + ":" + sourceStreamName + var layerPath string + + if _, err := os.Lstat(adsFile); err == nil { + layerPathb, err := os.ReadFile(mount + ":" + sourceStreamName) + if err != nil { + return fmt.Errorf("failed to retrieve source for layer %s: %w", mount, err) } - ) - - if err := hcsshim.UnprepareLayer(di, layerID); err != nil { - return fmt.Errorf("failed to unprepare layer %s: %w", mount, err) - } - if err := hcsshim.DeactivateLayer(di, layerID); err != nil { - return fmt.Errorf("failed to deactivate layer %s: %w", mount, err) + layerPath = string(layerPathb) } + if err := bindfilter.RemoveFileBinding(mount); err != nil { + if errors.Is(err, windows.ERROR_INVALID_PARAMETER) || errors.Is(err, windows.ERROR_NOT_FOUND) { + // not a mount point + return nil + } + return fmt.Errorf("removing mount: %w", err) + } + + if layerPath != "" { + var ( + home, layerID = filepath.Split(layerPath) + di = hcsshim.DriverInfo{ + HomeDir: home, + } + ) + + if err := hcsshim.UnprepareLayer(di, layerID); err != nil { + return fmt.Errorf("failed to unprepare layer %s: %w", mount, err) + } + + if err := hcsshim.DeactivateLayer(di, layerID); err != nil { + return fmt.Errorf("failed to deactivate layer %s: %w", mount, err) + } + } return nil } // UnmountAll unmounts from the provided path func UnmountAll(mount string, flags int) error { + if mount == "" { + // This isn't an error, per the EINVAL handling in the Linux version + return nil + } + if _, err := os.Stat(mount); os.IsNotExist(err) { + return nil + } + return Unmount(mount, flags) } + +// UnmountRecursive unmounts from the provided path +func UnmountRecursive(mount string, flags int) error { + return UnmountAll(mount, flags) +} diff --git a/vendor/github.com/containerd/containerd/mount/temp.go b/vendor/github.com/containerd/containerd/mount/temp.go index 889d49c1ad..83143521ab 100644 --- a/vendor/github.com/containerd/containerd/mount/temp.go +++ b/vendor/github.com/containerd/containerd/mount/temp.go @@ -49,7 +49,7 @@ func WithTempMount(ctx context.Context, mounts []Mount, f func(root string) erro // We should do defer first, if not we will not do Unmount when only a part of Mounts are failed. defer func() { - if uerr = UnmountAll(root, 0); uerr != nil { + if uerr = UnmountMounts(mounts, root, 0); uerr != nil { uerr = fmt.Errorf("failed to unmount %s: %w", root, uerr) if err == nil { err = uerr diff --git a/vendor/github.com/containerd/containerd/mount/temp_unix.go b/vendor/github.com/containerd/containerd/mount/temp_unix.go index e969700818..5ddd2cd160 100644 --- a/vendor/github.com/containerd/containerd/mount/temp_unix.go +++ b/vendor/github.com/containerd/containerd/mount/temp_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !darwin -// +build !windows,!darwin /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/mount/temp_unsupported.go b/vendor/github.com/containerd/containerd/mount/temp_unsupported.go index feec90a76f..3ccc0444ff 100644 --- a/vendor/github.com/containerd/containerd/mount/temp_unsupported.go +++ b/vendor/github.com/containerd/containerd/mount/temp_unsupported.go @@ -1,5 +1,4 @@ //go:build windows || darwin -// +build windows darwin /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/namespaces.go b/vendor/github.com/containerd/containerd/namespaces.go index 4c66406b08..83ee828dd0 100644 --- a/vendor/github.com/containerd/containerd/namespaces.go +++ b/vendor/github.com/containerd/containerd/namespaces.go @@ -23,7 +23,7 @@ import ( api "github.com/containerd/containerd/api/services/namespaces/v1" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/namespaces" - "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf/types" ) // NewNamespaceStoreFromClient returns a new namespace store @@ -38,7 +38,7 @@ type remoteNamespaces struct { func (r *remoteNamespaces) Create(ctx context.Context, namespace string, labels map[string]string) error { var req api.CreateNamespaceRequest - req.Namespace = api.Namespace{ + req.Namespace = &api.Namespace{ Name: namespace, Labels: labels, } @@ -66,7 +66,7 @@ func (r *remoteNamespaces) Labels(ctx context.Context, namespace string) (map[st func (r *remoteNamespaces) SetLabel(ctx context.Context, namespace, key, value string) error { var req api.UpdateNamespaceRequest - req.Namespace = api.Namespace{ + req.Namespace = &api.Namespace{ Name: namespace, Labels: map[string]string{key: value}, } diff --git a/vendor/github.com/containerd/containerd/oci/mounts.go b/vendor/github.com/containerd/containerd/oci/mounts.go index 83dd0d0b10..8c758f4333 100644 --- a/vendor/github.com/containerd/containerd/oci/mounts.go +++ b/vendor/github.com/containerd/containerd/oci/mounts.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd /* Copyright The containerd Authors. @@ -69,3 +68,6 @@ func defaultMounts() []specs.Mount { }, } } + +// appendOSMounts is only used on FreeBSD, and a no-op on other platforms. +func appendOSMounts(_ *Spec, _ string) {} diff --git a/vendor/github.com/containerd/containerd/oci/mounts_freebsd.go b/vendor/github.com/containerd/containerd/oci/mounts_freebsd.go index 42b9d7affd..6675c55161 100644 --- a/vendor/github.com/containerd/containerd/oci/mounts_freebsd.go +++ b/vendor/github.com/containerd/containerd/oci/mounts_freebsd.go @@ -32,7 +32,34 @@ func defaultMounts() []specs.Mount { Destination: "/dev/fd", Type: "fdescfs", Source: "fdescfs", - Options: []string{}, }, } } + +// appendOSMounts modifies the mount spec to mount emulated Linux filesystems on FreeBSD, +// as per: https://wiki.freebsd.org/LinuxJails +func appendOSMounts(s *Spec, os string) { + // No-op for FreeBSD containers + if os != "linux" { + return + } + /* The nosuid noexec options are for consistency with Linux mounts: on FreeBSD it is + by default impossible to execute anything from these filesystems. + */ + var mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "linprocfs", + Source: "linprocfs", + Options: []string{"nosuid", "noexec"}, + }, + { + Destination: "/sys", + Type: "linsysfs", + Source: "linsysfs", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + } + + s.Mounts = append(mounts, s.Mounts...) +} diff --git a/vendor/github.com/containerd/containerd/oci/spec.go b/vendor/github.com/containerd/containerd/oci/spec.go index a1c98ddcbd..bee3b44d6a 100644 --- a/vendor/github.com/containerd/containerd/oci/spec.go +++ b/vendor/github.com/containerd/containerd/oci/spec.go @@ -21,11 +21,11 @@ import ( "path/filepath" "runtime" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/platforms" + "github.com/opencontainers/runtime-spec/specs-go" "github.com/containerd/containerd/containers" - specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/platforms" ) const ( @@ -66,15 +66,19 @@ func generateDefaultSpecWithPlatform(ctx context.Context, platform, id string, s return err } - if plat.OS == "windows" { + switch plat.OS { + case "windows": err = populateDefaultWindowsSpec(ctx, s, id) - } else { + case "darwin": + err = populateDefaultDarwinSpec(s) + default: err = populateDefaultUnixSpec(ctx, s, id) if err == nil && runtime.GOOS == "windows" { // To run LCOW we have a Linux and Windows section. Add an empty one now. s.Windows = &specs.Windows{} } } + return err } @@ -207,3 +211,12 @@ func populateDefaultWindowsSpec(ctx context.Context, s *Spec, id string) error { } return nil } + +func populateDefaultDarwinSpec(s *Spec) error { + *s = Spec{ + Version: specs.Version, + Root: &specs.Root{}, + Process: &specs.Process{Cwd: "/"}, + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go index 65811fc23d..f388289885 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go @@ -82,6 +82,10 @@ func setResources(s *Spec) { s.Linux.Resources = &specs.LinuxResources{} } } +} + +//nolint:nolintlint,unused // not used on all platforms +func setResourcesWindows(s *Spec) { if s.Windows != nil { if s.Windows.Resources == nil { s.Windows.Resources = &specs.WindowsResources{} @@ -97,6 +101,11 @@ func setCPU(s *Spec) { s.Linux.Resources.CPU = &specs.LinuxCPU{} } } +} + +//nolint:nolintlint,unused // not used on all platforms +func setCPUWindows(s *Spec) { + setResourcesWindows(s) if s.Windows != nil { if s.Windows.Resources.CPU == nil { s.Windows.Resources.CPU = &specs.WindowsCPUResources{} @@ -189,23 +198,23 @@ func replaceOrAppendEnvValues(defaults, overrides []string) []string { cache := make(map[string]int, len(defaults)) results := make([]string, 0, len(defaults)) for i, e := range defaults { - parts := strings.SplitN(e, "=", 2) + k, _, _ := strings.Cut(e, "=") results = append(results, e) - cache[parts[0]] = i + cache[k] = i } for _, value := range overrides { // Values w/o = means they want this env to be removed/unset. - if !strings.Contains(value, "=") { - if i, exists := cache[value]; exists { + k, _, ok := strings.Cut(value, "=") + if !ok { + if i, exists := cache[k]; exists { results[i] = "" // Used to indicate it should be removed } continue } // Just do a normal set/update - parts := strings.SplitN(value, "=", 2) - if i, exists := cache[parts[0]]; exists { + if i, exists := cache[k]; exists { results[i] = value } else { results = append(results, value) @@ -276,6 +285,14 @@ func WithHostname(name string) SpecOpts { } } +// WithDomainname sets the container's NIS domain name +func WithDomainname(name string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + s.Domainname = name + return nil + } +} + // WithMounts appends mounts func WithMounts(mounts []specs.Mount) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { @@ -378,6 +395,7 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts { return fmt.Errorf("unknown image config media type %s", ic.MediaType) } + appendOSMounts(s, ociimage.OS) setProcess(s) if s.Linux != nil { defaults := config.Env @@ -589,7 +607,9 @@ func WithUser(userstr string) SpecOpts { // The `Username` field on the runtime spec is marked by Platform as only for Windows, and in this case it // *is* being set on a Windows host at least, but will be used as a temporary holding spot until the guest // can use the string to perform these same operations to grab the uid:gid inside. - if s.Windows != nil && s.Linux != nil { + // + // Mounts are not supported on Darwin, so using the same workaround. + if (s.Windows != nil && s.Linux != nil) || runtime.GOOS == "darwin" { s.Process.User.Username = userstr return nil } @@ -1334,12 +1354,28 @@ func WithLinuxDevices(devices []specs.LinuxDevice) SpecOpts { } } +func WithLinuxDeviceFollowSymlinks(path, permissions string) SpecOpts { + return withLinuxDevice(path, permissions, true) +} + // WithLinuxDevice adds the device specified by path to the spec func WithLinuxDevice(path, permissions string) SpecOpts { + return withLinuxDevice(path, permissions, false) +} + +func withLinuxDevice(path, permissions string, followSymlinks bool) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { setLinux(s) setResources(s) + if followSymlinks { + resolvedPath, err := filepath.EvalSymlinks(path) + if err != nil { + return err + } + path = resolvedPath + } + dev, err := DeviceFromPath(path) if err != nil { return err @@ -1404,3 +1440,184 @@ func WithDevShmSize(kb int64) SpecOpts { return ErrNoShmMount } } + +// WithWindowsDevice adds a device exposed to a Windows (WCOW or LCOW) Container +func WithWindowsDevice(idType, id string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if idType == "" { + return errors.New("missing idType") + } + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + s.Windows.Devices = append(s.Windows.Devices, specs.WindowsDevice{IDType: idType, ID: id}) + return nil + } +} + +// WithMemorySwap sets the container's swap in bytes +func WithMemorySwap(swap int64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setResources(s) + if s.Linux.Resources.Memory == nil { + s.Linux.Resources.Memory = &specs.LinuxMemory{} + } + s.Linux.Resources.Memory.Swap = &swap + return nil + } +} + +// WithPidsLimit sets the container's pid limit or maximum +func WithPidsLimit(limit int64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setResources(s) + if s.Linux.Resources.Pids == nil { + s.Linux.Resources.Pids = &specs.LinuxPids{} + } + s.Linux.Resources.Pids.Limit = limit + return nil + } +} + +// WithBlockIO sets the container's blkio parameters +func WithBlockIO(blockio *specs.LinuxBlockIO) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setResources(s) + s.Linux.Resources.BlockIO = blockio + return nil + } +} + +// WithCPUShares sets the container's cpu shares +func WithCPUShares(shares uint64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setCPU(s) + s.Linux.Resources.CPU.Shares = &shares + return nil + } +} + +// WithCPUs sets the container's cpus/cores for use by the container +func WithCPUs(cpus string) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setCPU(s) + s.Linux.Resources.CPU.Cpus = cpus + return nil + } +} + +// WithCPUsMems sets the container's cpu mems for use by the container +func WithCPUsMems(mems string) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setCPU(s) + s.Linux.Resources.CPU.Mems = mems + return nil + } +} + +// WithCPUCFS sets the container's Completely fair scheduling (CFS) quota and period +func WithCPUCFS(quota int64, period uint64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setCPU(s) + s.Linux.Resources.CPU.Quota = "a + s.Linux.Resources.CPU.Period = &period + return nil + } +} + +// WithCPURT sets the container's realtime scheduling (RT) runtime and period. +func WithCPURT(runtime int64, period uint64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setCPU(s) + s.Linux.Resources.CPU.RealtimeRuntime = &runtime + s.Linux.Resources.CPU.RealtimePeriod = &period + return nil + } +} + +// WithoutRunMount removes the `/run` inside the spec +func WithoutRunMount(ctx context.Context, client Client, c *containers.Container, s *Spec) error { + return WithoutMounts("/run")(ctx, client, c, s) +} + +// WithRdt sets the container's RDT parameters +func WithRdt(closID, l3CacheSchema, memBwSchema string) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + s.Linux.IntelRdt = &specs.LinuxIntelRdt{ + ClosID: closID, + L3CacheSchema: l3CacheSchema, + MemBwSchema: memBwSchema, + } + return nil + } +} + +// WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the +// `count` specified. +func WithWindowsCPUCount(count uint64) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setCPUWindows(s) + s.Windows.Resources.CPU.Count = &count + return nil + } +} + +// WithWindowsCPUShares sets the `Windows.Resources.CPU.Shares` section to the +// `shares` specified. +func WithWindowsCPUShares(shares uint16) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setCPUWindows(s) + s.Windows.Resources.CPU.Shares = &shares + return nil + } +} + +// WithWindowsCPUMaximum sets the `Windows.Resources.CPU.Maximum` section to the +// `max` specified. +func WithWindowsCPUMaximum(max uint16) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setCPUWindows(s) + s.Windows.Resources.CPU.Maximum = &max + return nil + } +} + +// WithWindowsIgnoreFlushesDuringBoot sets `Windows.IgnoreFlushesDuringBoot`. +func WithWindowsIgnoreFlushesDuringBoot() SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + s.Windows.IgnoreFlushesDuringBoot = true + return nil + } +} + +// WithWindowNetworksAllowUnqualifiedDNSQuery sets `Windows.Network.AllowUnqualifiedDNSQuery`. +func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + if s.Windows.Network == nil { + s.Windows.Network = &specs.WindowsNetwork{} + } + + s.Windows.Network.AllowUnqualifiedDNSQuery = true + return nil + } +} + +// WithWindowsNetworkNamespace sets the network namespace for a Windows container. +func WithWindowsNetworkNamespace(ns string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + if s.Windows.Network == nil { + s.Windows.Network = &specs.WindowsNetwork{} + } + s.Windows.Network.NetworkNamespace = ns + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go index 34651d1fff..bccea766aa 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go @@ -60,67 +60,6 @@ func WithDevices(devicePath, containerPath, permissions string) SpecOpts { } } -// WithMemorySwap sets the container's swap in bytes -func WithMemorySwap(swap int64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - setResources(s) - if s.Linux.Resources.Memory == nil { - s.Linux.Resources.Memory = &specs.LinuxMemory{} - } - s.Linux.Resources.Memory.Swap = &swap - return nil - } -} - -// WithPidsLimit sets the container's pid limit or maximum -func WithPidsLimit(limit int64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - setResources(s) - if s.Linux.Resources.Pids == nil { - s.Linux.Resources.Pids = &specs.LinuxPids{} - } - s.Linux.Resources.Pids.Limit = limit - return nil - } -} - -// WithCPUShares sets the container's cpu shares -func WithCPUShares(shares uint64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - setCPU(s) - s.Linux.Resources.CPU.Shares = &shares - return nil - } -} - -// WithCPUs sets the container's cpus/cores for use by the container -func WithCPUs(cpus string) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - setCPU(s) - s.Linux.Resources.CPU.Cpus = cpus - return nil - } -} - -// WithCPUsMems sets the container's cpu mems for use by the container -func WithCPUsMems(mems string) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - setCPU(s) - s.Linux.Resources.CPU.Mems = mems - return nil - } -} - -// WithCPUCFS sets the container's Completely fair scheduling (CFS) quota and period -func WithCPUCFS(quota int64, period uint64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - setCPU(s) - s.Linux.Resources.CPU.Quota = "a - s.Linux.Resources.CPU.Period = &period - return nil - } -} - // WithAllCurrentCapabilities propagates the effective capabilities of the caller process to the container process. // The capability set may differ from WithAllKnownCapabilities when running in a container. var WithAllCurrentCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { @@ -137,23 +76,6 @@ var WithAllKnownCapabilities = func(ctx context.Context, client Client, c *conta return WithCapabilities(caps)(ctx, client, c, s) } -// WithoutRunMount removes the `/run` inside the spec -func WithoutRunMount(ctx context.Context, client Client, c *containers.Container, s *Spec) error { - return WithoutMounts("/run")(ctx, client, c, s) -} - -// WithRdt sets the container's RDT parameters -func WithRdt(closID, l3CacheSchema, memBwSchema string) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - s.Linux.IntelRdt = &specs.LinuxIntelRdt{ - ClosID: closID, - L3CacheSchema: l3CacheSchema, - MemBwSchema: memBwSchema, - } - return nil - } -} - func escapeAndCombineArgs(args []string) string { panic("not supported") } diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_nonlinux.go b/vendor/github.com/containerd/containerd/oci/spec_opts_nonlinux.go index ad1faa46ac..b2f796f361 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_nonlinux.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_nonlinux.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* Copyright The containerd Authors. @@ -21,7 +20,6 @@ package oci import ( "context" - "errors" "github.com/containerd/containerd/containers" ) @@ -36,17 +34,3 @@ var WithAllCurrentCapabilities = func(ctx context.Context, client Client, c *con var WithAllKnownCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { return WithCapabilities(nil)(ctx, client, c, s) } - -// WithCPUShares sets the container's cpu shares -func WithCPUShares(shares uint64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - return nil - } -} - -// WithRdt sets the container's RDT parameters -func WithRdt(closID, l3CacheSchema, memBwSchema string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, _ *Spec) error { - return errors.New("RDT not supported") - } -} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go index a6165777fe..f4135285cc 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go @@ -1,5 +1,4 @@ //go:build !linux && !windows -// +build !linux,!windows /* Copyright The containerd Authors. @@ -51,13 +50,6 @@ func WithDevices(devicePath, containerPath, permissions string) SpecOpts { } } -// WithCPUCFS sets the container's Completely fair scheduling (CFS) quota and period -func WithCPUCFS(quota int64, period uint64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - return nil - } -} - func escapeAndCombineArgs(args []string) string { panic("not supported") } diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go index 602d40e4a9..7624daf112 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go @@ -21,51 +21,18 @@ import ( "errors" "strings" - "github.com/containerd/containerd/containers" - - specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-spec/specs-go" "golang.org/x/sys/windows" + + "github.com/containerd/containerd/containers" ) -// WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the -// `count` specified. -func WithWindowsCPUCount(count uint64) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - if s.Windows.Resources == nil { - s.Windows.Resources = &specs.WindowsResources{} - } - if s.Windows.Resources.CPU == nil { - s.Windows.Resources.CPU = &specs.WindowsCPUResources{} - } - s.Windows.Resources.CPU.Count = &count - return nil - } -} - -// WithWindowsIgnoreFlushesDuringBoot sets `Windows.IgnoreFlushesDuringBoot`. -func WithWindowsIgnoreFlushesDuringBoot() SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - if s.Windows == nil { - s.Windows = &specs.Windows{} - } - s.Windows.IgnoreFlushesDuringBoot = true - return nil - } -} - -// WithWindowNetworksAllowUnqualifiedDNSQuery sets `Windows.Network.AllowUnqualifiedDNSQuery`. -func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - if s.Windows == nil { - s.Windows = &specs.Windows{} - } - if s.Windows.Network == nil { - s.Windows.Network = &specs.WindowsNetwork{} - } - - s.Windows.Network.AllowUnqualifiedDNSQuery = true - return nil +func escapeAndCombineArgs(args []string) string { + escaped := make([]string, len(args)) + for i, a := range args { + escaped[i] = windows.EscapeArg(a) } + return strings.Join(escaped, " ") } // WithProcessCommandLine replaces the command line on the generated spec @@ -89,24 +56,9 @@ func DeviceFromPath(path string) (*specs.LinuxDevice, error) { return nil, errors.New("device from path not supported on Windows") } -// WithWindowsNetworkNamespace sets the network namespace for a Windows container. -func WithWindowsNetworkNamespace(ns string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - if s.Windows == nil { - s.Windows = &specs.Windows{} - } - if s.Windows.Network == nil { - s.Windows.Network = &specs.WindowsNetwork{} - } - s.Windows.Network.NetworkNamespace = ns +// WithDevices does nothing on Windows. +func WithDevices(devicePath, containerPath, permissions string) SpecOpts { + return func(ctx context.Context, client Client, container *containers.Container, spec *Spec) error { return nil } } - -func escapeAndCombineArgs(args []string) string { - escaped := make([]string, len(args)) - for i, a := range args { - escaped[i] = windows.EscapeArg(a) - } - return strings.Join(escaped, " ") -} diff --git a/vendor/github.com/containerd/containerd/oci/utils_unix.go b/vendor/github.com/containerd/containerd/oci/utils_unix.go index 306f098146..b3cb8a600d 100644 --- a/vendor/github.com/containerd/containerd/oci/utils_unix.go +++ b/vendor/github.com/containerd/containerd/oci/utils_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/oss_fuzz.go b/vendor/github.com/containerd/containerd/oss_fuzz.go new file mode 100644 index 0000000000..8d1def4f09 --- /dev/null +++ b/vendor/github.com/containerd/containerd/oss_fuzz.go @@ -0,0 +1,26 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containerd + +import ( + "github.com/AdamKorcz/go-118-fuzz-build/testing" +) + +// To keep this package in go.mod. +var _ = testing.F{} diff --git a/vendor/github.com/containerd/containerd/pkg/apparmor/apparmor_unsupported.go b/vendor/github.com/containerd/containerd/pkg/apparmor/apparmor_unsupported.go index 833170338e..3c3c5beb67 100644 --- a/vendor/github.com/containerd/containerd/pkg/apparmor/apparmor_unsupported.go +++ b/vendor/github.com/containerd/containerd/pkg/apparmor/apparmor_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/pkg/cap/cap_linux.go b/vendor/github.com/containerd/containerd/pkg/cap/cap_linux.go index 26212573dc..63fa104fb4 100644 --- a/vendor/github.com/containerd/containerd/pkg/cap/cap_linux.go +++ b/vendor/github.com/containerd/containerd/pkg/cap/cap_linux.go @@ -80,15 +80,14 @@ func ParseProcPIDStatus(r io.Reader) (map[Type]uint64, error) { scanner := bufio.NewScanner(r) for scanner.Scan() { line := scanner.Text() - pair := strings.SplitN(line, ":", 2) - if len(pair) != 2 { + k, v, ok := strings.Cut(line, ":") + if !ok { continue } - k := strings.TrimSpace(pair[0]) - v := strings.TrimSpace(pair[1]) + k = strings.TrimSpace(k) switch k { case "CapInh", "CapPrm", "CapEff", "CapBnd", "CapAmb": - ui64, err := strconv.ParseUint(v, 16, 64) + ui64, err := strconv.ParseUint(strings.TrimSpace(v), 16, 64) if err != nil { return nil, fmt.Errorf("failed to parse line %q", line) } diff --git a/vendor/github.com/containerd/containerd/pkg/cleanup/context.go b/vendor/github.com/containerd/containerd/pkg/cleanup/context.go new file mode 100644 index 0000000000..62741c4ca0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/cleanup/context.go @@ -0,0 +1,52 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package providing utilies to help cleanup +package cleanup + +import ( + "context" + "time" +) + +type clearCancel struct { + context.Context +} + +func (cc clearCancel) Deadline() (deadline time.Time, ok bool) { + return +} + +func (cc clearCancel) Done() <-chan struct{} { + return nil +} + +func (cc clearCancel) Err() error { + return nil +} + +// Background creates a new context which clears out the parent errors +func Background(ctx context.Context) context.Context { + return clearCancel{ctx} +} + +// Do runs the provided function with a context in which the +// errors are cleared out and will timeout after 10 seconds. +func Do(ctx context.Context, do func(context.Context)) { + ctx, cancel := context.WithTimeout(clearCancel{ctx}, 10*time.Second) + do(ctx) + cancel() +} diff --git a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go index b4304ffbf1..798566bc5d 100644 --- a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go +++ b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/sys/epoll.go b/vendor/github.com/containerd/containerd/pkg/epoch/context.go similarity index 53% rename from vendor/github.com/containerd/containerd/sys/epoll.go rename to vendor/github.com/containerd/containerd/pkg/epoch/context.go index 73a57013ff..fd16f95196 100644 --- a/vendor/github.com/containerd/containerd/sys/epoll.go +++ b/vendor/github.com/containerd/containerd/pkg/epoch/context.go @@ -1,6 +1,3 @@ -//go:build linux -// +build linux - /* Copyright The containerd Authors. @@ -17,18 +14,28 @@ limitations under the License. */ -package sys +package epoch -import "golang.org/x/sys/unix" +import ( + "context" + "time" +) -// EpollCreate1 is an alias for unix.EpollCreate1 -// Deprecated: use golang.org/x/sys/unix.EpollCreate1 -var EpollCreate1 = unix.EpollCreate1 +type ( + epochKey struct{} +) -// EpollCtl is an alias for unix.EpollCtl -// Deprecated: use golang.org/x/sys/unix.EpollCtl -var EpollCtl = unix.EpollCtl +// WithSourceDateEpoch associates the context with the epoch. +func WithSourceDateEpoch(ctx context.Context, tm *time.Time) context.Context { + return context.WithValue(ctx, epochKey{}, tm) +} -// EpollWait is an alias for unix.EpollWait -// Deprecated: use golang.org/x/sys/unix.EpollWait -var EpollWait = unix.EpollWait +// FromContext returns the epoch associated with the context. +// FromContext does not fall back to read the SOURCE_DATE_EPOCH env var. +func FromContext(ctx context.Context) *time.Time { + v := ctx.Value(epochKey{}) + if v == nil { + return nil + } + return v.(*time.Time) +} diff --git a/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go b/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go new file mode 100644 index 0000000000..124e8edb50 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go @@ -0,0 +1,69 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package epoch provides SOURCE_DATE_EPOCH utilities. +package epoch + +import ( + "fmt" + "os" + "strconv" + "time" + + "github.com/sirupsen/logrus" +) + +// SourceDateEpochEnv is the SOURCE_DATE_EPOCH env var. +// See https://reproducible-builds.org/docs/source-date-epoch/ +const SourceDateEpochEnv = "SOURCE_DATE_EPOCH" + +// SourceDateEpoch returns the SOURCE_DATE_EPOCH env var as *time.Time. +// If the env var is not set, SourceDateEpoch returns nil without an error. +func SourceDateEpoch() (*time.Time, error) { + v, ok := os.LookupEnv(SourceDateEpochEnv) + if !ok || v == "" { + return nil, nil // not an error + } + i64, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid %s value %q: %w", SourceDateEpochEnv, v, err) + } + unix := time.Unix(i64, 0).UTC() + return &unix, nil +} + +// SourceDateEpochOrNow returns the SOURCE_DATE_EPOCH time if available, +// otherwise returns the current time. +func SourceDateEpochOrNow() time.Time { + epoch, err := SourceDateEpoch() + if err != nil { + logrus.WithError(err).Warnf("Invalid %s", SourceDateEpochEnv) + } + if epoch != nil { + return *epoch + } + return time.Now().UTC() +} + +// SetSourceDateEpoch sets the SOURCE_DATE_EPOCH env var. +func SetSourceDateEpoch(tm time.Time) { + os.Setenv(SourceDateEpochEnv, fmt.Sprintf("%d", tm.Unix())) +} + +// UnsetSourceDateEpoch unsets the SOURCE_DATE_EPOCH env var. +func UnsetSourceDateEpoch() { + os.Unsetenv(SourceDateEpochEnv) +} diff --git a/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.pb.go b/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.pb.go index a224dc6f21..8a30127be2 100644 --- a/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.pb.go +++ b/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.pb.go @@ -1,397 +1,177 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// To regenerate api.pb.go run `make protos` + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/pkg/runtimeoptions/v1/api.proto package runtimeoptions_v1 import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Options struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // TypeUrl specifies the type of the content inside the config file. TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // ConfigPath specifies the filesystem location of the config file // used by the runtime. - ConfigPath string `protobuf:"bytes,2,opt,name=config_path,json=configPath,proto3" json:"config_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` + ConfigPath string `protobuf:"bytes,2,opt,name=config_path,json=configPath,proto3" json:"config_path,omitempty"` + // Blob specifies an in-memory TOML blob passed from containerd's configuration section + // for this runtime. This will be used if config_path is not specified. + ConfigBody []byte `protobuf:"bytes,3,opt,name=config_body,json=configBody,proto3" json:"config_body,omitempty"` +} + +func (x *Options) Reset() { + *x = Options{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Options) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Options) Reset() { *m = Options{} } func (*Options) ProtoMessage() {} + +func (x *Options) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Options.ProtoReflect.Descriptor instead. func (*Options) Descriptor() ([]byte, []int) { - return fileDescriptor_7700dd27e3487aa6, []int{0} -} -func (m *Options) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Options) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Options.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Options) XXX_Merge(src proto.Message) { - xxx_messageInfo_Options.Merge(m, src) -} -func (m *Options) XXX_Size() int { - return m.Size() -} -func (m *Options) XXX_DiscardUnknown() { - xxx_messageInfo_Options.DiscardUnknown(m) + return file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescGZIP(), []int{0} } -var xxx_messageInfo_Options proto.InternalMessageInfo - -func (m *Options) GetTypeUrl() string { - if m != nil { - return m.TypeUrl +func (x *Options) GetTypeUrl() string { + if x != nil { + return x.TypeUrl } return "" } -func (m *Options) GetConfigPath() string { - if m != nil { - return m.ConfigPath +func (x *Options) GetConfigPath() string { + if x != nil { + return x.ConfigPath } return "" } -func init() { - proto.RegisterType((*Options)(nil), "runtimeoptions.v1.Options") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/pkg/runtimeoptions/v1/api.proto", fileDescriptor_7700dd27e3487aa6) -} - -var fileDescriptor_7700dd27e3487aa6 = []byte{ - // 214 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x48, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x16, 0x64, 0xa7, 0xeb, 0x17, 0x95, 0xe6, 0x95, 0x64, 0xe6, 0xa6, 0xe6, 0x17, - 0x94, 0x64, 0xe6, 0xe7, 0x15, 0xeb, 0x97, 0x19, 0xea, 0x27, 0x16, 0x64, 0xea, 0x15, 0x14, 0xe5, - 0x97, 0xe4, 0x0b, 0x09, 0xa2, 0x4a, 0xea, 0x95, 0x19, 0x4a, 0xe9, 0x22, 0x19, 0x9a, 0x9e, 0x9f, - 0x9e, 0xaf, 0x0f, 0x56, 0x99, 0x54, 0x9a, 0x06, 0xe6, 0x81, 0x39, 0x60, 0x16, 0xc4, 0x04, 0x25, - 0x57, 0x2e, 0x76, 0x7f, 0x88, 0x66, 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, - 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, - 0x9e, 0x8b, 0x3b, 0x39, 0x3f, 0x2f, 0x2d, 0x33, 0x3d, 0xbe, 0x20, 0xb1, 0x24, 0x43, 0x82, 0x09, - 0x2c, 0xcb, 0x05, 0x11, 0x0a, 0x48, 0x2c, 0xc9, 0x70, 0x4a, 0x3b, 0xf1, 0x50, 0x8e, 0xf1, 0xc6, - 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, - 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51, 0x1e, 0xe4, 0x79, 0xd4, 0x1a, 0x55, 0x24, - 0xbe, 0xcc, 0x30, 0x89, 0x0d, 0xec, 0x6a, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x91, 0x3c, - 0x3e, 0x79, 0x3b, 0x01, 0x00, 0x00, -} - -func (m *Options) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Options) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Options) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ConfigPath) > 0 { - i -= len(m.ConfigPath) - copy(dAtA[i:], m.ConfigPath) - i = encodeVarintApi(dAtA, i, uint64(len(m.ConfigPath))) - i-- - dAtA[i] = 0x12 - } - if len(m.TypeUrl) > 0 { - i -= len(m.TypeUrl) - copy(dAtA[i:], m.TypeUrl) - i = encodeVarintApi(dAtA, i, uint64(len(m.TypeUrl))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintApi(dAtA []byte, offset int, v uint64) int { - offset -= sovApi(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Options) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.TypeUrl) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.ConfigPath) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func sovApi(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozApi(x uint64) (n int) { - return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Options) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Options{`, - `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, - `ConfigPath:` + fmt.Sprintf("%v", this.ConfigPath) + `,`, - `}`, - }, "") - return s -} -func valueToStringApi(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Options) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Options: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Options: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TypeUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConfigPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Options) GetConfigBody() []byte { + if x != nil { + return x.ConfigBody } return nil } -func skipApi(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthApi - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupApi - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthApi - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + +var File_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDesc = []byte{ + 0x0a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x11, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x76, 0x31, 0x22, 0x66, 0x0a, 0x07, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x6f, 0x64, 0x79, 0x42, 0x4a, 0x5a, + 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( - ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescData = file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDesc ) + +func file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescData) + }) + return file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescData +} + +var file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_goTypes = []interface{}{ + (*Options)(nil), // 0: runtimeoptions.v1.Options +} +var file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_init() } +func file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_init() { + if File_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Options); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto = out.File + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDesc = nil + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_goTypes = nil + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.proto b/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.proto index c771ea10ee..d0ab0e2f95 100644 --- a/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.proto +++ b/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.proto @@ -3,17 +3,6 @@ syntax = "proto3"; package runtimeoptions.v1; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option (gogoproto.goproto_getters_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_unrecognized_all) = false; - - option go_package = "github.com/containerd/containerd/pkg/runtimeoptions/v1;runtimeoptions_v1"; message Options { @@ -22,4 +11,7 @@ message Options { // ConfigPath specifies the filesystem location of the config file // used by the runtime. string config_path = 2; + // Blob specifies an in-memory TOML blob passed from containerd's configuration section + // for this runtime. This will be used if config_path is not specified. + bytes config_body = 3; } diff --git a/vendor/github.com/containerd/containerd/pkg/seccomp/seccomp_unsupported.go b/vendor/github.com/containerd/containerd/pkg/seccomp/seccomp_unsupported.go index 4458c1c702..41c2ceacbb 100644 --- a/vendor/github.com/containerd/containerd/pkg/seccomp/seccomp_unsupported.go +++ b/vendor/github.com/containerd/containerd/pkg/seccomp/seccomp_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go b/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go index bc1af75abb..12ffac1457 100644 --- a/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go +++ b/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go @@ -37,6 +37,11 @@ type Service interface { // the shutdown channel is closed. A callback error will propagate to the // context error RegisterCallback(func(context.Context) error) + // Done returns a channel that's closed when all shutdown callbacks are invoked. + Done() <-chan struct{} + // Err returns nil if Done is not yet closed. + // If Done is closed, Err returns first failed callback error or ErrShutdown. + Err() error } // WithShutdown returns a context which is similar to a cancel context, but @@ -99,6 +104,7 @@ func (s *shutdownService) Err() error { defer s.mu.Unlock() return s.err } + func (s *shutdownService) RegisterCallback(fn func(context.Context) error) { s.mu.Lock() defer s.mu.Unlock() diff --git a/vendor/github.com/containerd/containerd/pkg/streaming/streaming.go b/vendor/github.com/containerd/containerd/pkg/streaming/streaming.go new file mode 100644 index 0000000000..15fc6c3c1b --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/streaming/streaming.go @@ -0,0 +1,47 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package streaming + +import ( + "context" + + "github.com/containerd/typeurl/v2" +) + +type StreamManager interface { + StreamGetter + Register(context.Context, string, Stream) error +} + +type StreamGetter interface { + Get(context.Context, string) (Stream, error) +} + +type StreamCreator interface { + Create(context.Context, string) (Stream, error) +} + +type Stream interface { + // Send sends the object on the stream + Send(typeurl.Any) error + + // Recv receives an object on the stream + Recv() (typeurl.Any, error) + + // Close closes the stream + Close() error +} diff --git a/vendor/github.com/containerd/containerd/pkg/transfer/proxy/transfer.go b/vendor/github.com/containerd/containerd/pkg/transfer/proxy/transfer.go new file mode 100644 index 0000000000..50dba0b37c --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/transfer/proxy/transfer.go @@ -0,0 +1,122 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + "errors" + "io" + + "google.golang.org/protobuf/types/known/anypb" + + transferapi "github.com/containerd/containerd/api/services/transfer/v1" + transfertypes "github.com/containerd/containerd/api/types/transfer" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/streaming" + "github.com/containerd/containerd/pkg/transfer" + tstreaming "github.com/containerd/containerd/pkg/transfer/streaming" + "github.com/containerd/typeurl/v2" +) + +type proxyTransferrer struct { + client transferapi.TransferClient + streamCreator streaming.StreamCreator +} + +// NewTransferrer returns a new transferrer which communicates over a GRPC +// connection using the containerd transfer API +func NewTransferrer(client transferapi.TransferClient, sc streaming.StreamCreator) transfer.Transferrer { + return &proxyTransferrer{ + client: client, + streamCreator: sc, + } +} + +func (p *proxyTransferrer) Transfer(ctx context.Context, src interface{}, dst interface{}, opts ...transfer.Opt) error { + o := &transfer.Config{} + for _, opt := range opts { + opt(o) + } + apiOpts := &transferapi.TransferOptions{} + if o.Progress != nil { + sid := tstreaming.GenerateID("progress") + stream, err := p.streamCreator.Create(ctx, sid) + if err != nil { + return err + } + apiOpts.ProgressStream = sid + go func() { + for { + a, err := stream.Recv() + if err != nil { + if !errors.Is(err, io.EOF) { + log.G(ctx).WithError(err).Error("progress stream failed to recv") + } + return + } + i, err := typeurl.UnmarshalAny(a) + if err != nil { + log.G(ctx).WithError(err).Warnf("failed to unmarshal progress object: %v", a.GetTypeUrl()) + } + switch v := i.(type) { + case *transfertypes.Progress: + o.Progress(transfer.Progress{ + Event: v.Event, + Name: v.Name, + Parents: v.Parents, + Progress: v.Progress, + Total: v.Total, + }) + default: + log.G(ctx).Warnf("unhandled progress object %T: %v", i, a.GetTypeUrl()) + } + } + }() + } + asrc, err := p.marshalAny(ctx, src) + if err != nil { + return err + } + adst, err := p.marshalAny(ctx, dst) + if err != nil { + return err + } + req := &transferapi.TransferRequest{ + Source: &anypb.Any{ + TypeUrl: asrc.GetTypeUrl(), + Value: asrc.GetValue(), + }, + Destination: &anypb.Any{ + TypeUrl: adst.GetTypeUrl(), + Value: adst.GetValue(), + }, + Options: apiOpts, + } + _, err = p.client.Transfer(ctx, req) + return err +} +func (p *proxyTransferrer) marshalAny(ctx context.Context, i interface{}) (typeurl.Any, error) { + switch m := i.(type) { + case streamMarshaler: + return m.MarshalAny(ctx, p.streamCreator) + } + return typeurl.MarshalAny(i) +} + +type streamMarshaler interface { + MarshalAny(context.Context, streaming.StreamCreator) (typeurl.Any, error) +} diff --git a/vendor/github.com/containerd/containerd/pkg/transfer/streaming/stream.go b/vendor/github.com/containerd/containerd/pkg/transfer/streaming/stream.go new file mode 100644 index 0000000000..c859cf42be --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/transfer/streaming/stream.go @@ -0,0 +1,210 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package streaming + +import ( + "context" + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + "time" + + transferapi "github.com/containerd/containerd/api/types/transfer" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/streaming" + "github.com/containerd/typeurl/v2" +) + +const maxRead = 32 * 1024 +const windowSize = 2 * maxRead + +var bufPool = &sync.Pool{ + New: func() interface{} { + buffer := make([]byte, maxRead) + return &buffer + }, +} + +func SendStream(ctx context.Context, r io.Reader, stream streaming.Stream) { + window := make(chan int32) + go func() { + defer close(window) + for { + select { + case <-ctx.Done(): + return + default: + } + + any, err := stream.Recv() + if err != nil { + if !errors.Is(err, io.EOF) && !errors.Is(err, context.Canceled) { + log.G(ctx).WithError(err).Error("send stream ended without EOF") + } + return + } + i, err := typeurl.UnmarshalAny(any) + if err != nil { + log.G(ctx).WithError(err).Error("failed to unmarshal stream object") + continue + } + switch v := i.(type) { + case *transferapi.WindowUpdate: + select { + case <-ctx.Done(): + return + case window <- v.Update: + } + default: + log.G(ctx).Errorf("unexpected stream object of type %T", i) + } + } + }() + go func() { + defer stream.Close() + + buf := bufPool.Get().(*[]byte) + defer bufPool.Put(buf) + + var remaining int32 + + for { + if remaining > 0 { + // Don't wait for window update since there are remaining + select { + case <-ctx.Done(): + // TODO: Send error message on stream before close to allow remote side to return error + return + case update := <-window: + remaining += update + default: + } + } else { + // Block until window updated + select { + case <-ctx.Done(): + // TODO: Send error message on stream before close to allow remote side to return error + return + case update := <-window: + remaining = update + } + } + var max int32 = maxRead + if max > remaining { + max = remaining + } + b := (*buf)[:max] + n, err := r.Read(b) + if err != nil { + if !errors.Is(err, io.EOF) { + log.G(ctx).WithError(err).Errorf("failed to read stream source") + // TODO: Send error message on stream before close to allow remote side to return error + } + return + } + remaining = remaining - int32(n) + + data := &transferapi.Data{ + Data: b[:n], + } + any, err := typeurl.MarshalAny(data) + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to marshal data for send") + // TODO: Send error message on stream before close to allow remote side to return error + return + } + if err := stream.Send(any); err != nil { + log.G(ctx).WithError(err).Errorf("send failed") + return + } + } + }() +} + +func ReceiveStream(ctx context.Context, stream streaming.Stream) io.Reader { + r, w := io.Pipe() + go func() { + defer stream.Close() + var window int32 + for { + var werr error + if window < windowSize { + update := &transferapi.WindowUpdate{ + Update: windowSize, + } + any, err := typeurl.MarshalAny(update) + if err != nil { + w.CloseWithError(fmt.Errorf("failed to marshal window update: %w", err)) + return + } + // check window update error after recv, stream may be complete + if werr = stream.Send(any); werr == nil { + window += windowSize + } else if errors.Is(werr, io.EOF) { + // TODO: Why does send return EOF here + werr = nil + } + } + any, err := stream.Recv() + if err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) { + err = nil + } else { + err = fmt.Errorf("received failed: %w", err) + } + w.CloseWithError(err) + return + } else if werr != nil { + // Try receive before erroring out + w.CloseWithError(fmt.Errorf("failed to send window update: %w", werr)) + return + } + i, err := typeurl.UnmarshalAny(any) + if err != nil { + w.CloseWithError(fmt.Errorf("failed to unmarshal received object: %w", err)) + return + } + switch v := i.(type) { + case *transferapi.Data: + n, err := w.Write(v.Data) + if err != nil { + w.CloseWithError(fmt.Errorf("failed to unmarshal received object: %w", err)) + // Close will error out sender + return + } + window = window - int32(n) + // TODO: Handle error case + default: + log.G(ctx).Warnf("Ignoring unknown stream object of type %T", i) + continue + } + } + + }() + + return r +} + +func GenerateID(prefix string) string { + t := time.Now() + var b [3]byte + rand.Read(b[:]) + return fmt.Sprintf("%s-%d-%s", prefix, t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) +} diff --git a/vendor/github.com/containerd/containerd/pkg/transfer/streaming/writer.go b/vendor/github.com/containerd/containerd/pkg/transfer/streaming/writer.go new file mode 100644 index 0000000000..94db9d6a81 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/transfer/streaming/writer.go @@ -0,0 +1,130 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package streaming + +import ( + "context" + "errors" + "io" + "sync/atomic" + + transferapi "github.com/containerd/containerd/api/types/transfer" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/streaming" + "github.com/containerd/typeurl/v2" +) + +func WriteByteStream(ctx context.Context, stream streaming.Stream) io.WriteCloser { + wbs := &writeByteStream{ + ctx: ctx, + stream: stream, + updated: make(chan struct{}, 1), + } + go func() { + for { + select { + case <-ctx.Done(): + return + default: + } + + any, err := stream.Recv() + if err != nil { + if !errors.Is(err, io.EOF) && !errors.Is(err, context.Canceled) { + log.G(ctx).WithError(err).Error("send byte stream ended without EOF") + } + return + } + i, err := typeurl.UnmarshalAny(any) + if err != nil { + log.G(ctx).WithError(err).Error("failed to unmarshal stream object") + continue + } + switch v := i.(type) { + case *transferapi.WindowUpdate: + atomic.AddInt32(&wbs.remaining, v.Update) + select { + case <-ctx.Done(): + return + case wbs.updated <- struct{}{}: + default: + // Don't block if no writes are waiting + } + default: + log.G(ctx).Errorf("unexpected stream object of type %T", i) + } + } + }() + + return wbs +} + +type writeByteStream struct { + ctx context.Context + stream streaming.Stream + remaining int32 + updated chan struct{} +} + +func (wbs *writeByteStream) Write(p []byte) (n int, err error) { + for len(p) > 0 { + remaining := atomic.LoadInt32(&wbs.remaining) + if remaining == 0 { + // Don't wait for window update since there are remaining + select { + case <-wbs.ctx.Done(): + // TODO: Send error message on stream before close to allow remote side to return error + err = io.ErrShortWrite + return + case <-wbs.updated: + continue + } + } + var max int32 = maxRead + if max > int32(len(p)) { + max = int32(len(p)) + } + if max > remaining { + max = remaining + } + // TODO: continue + //remaining = remaining - int32(n) + + data := &transferapi.Data{ + Data: p[:max], + } + var any typeurl.Any + any, err = typeurl.MarshalAny(data) + if err != nil { + log.G(wbs.ctx).WithError(err).Errorf("failed to marshal data for send") + // TODO: Send error message on stream before close to allow remote side to return error + return + } + if err = wbs.stream.Send(any); err != nil { + log.G(wbs.ctx).WithError(err).Errorf("send failed") + return + } + n += int(max) + p = p[max:] + atomic.AddInt32(&wbs.remaining, -1*max) + } + return +} + +func (wbs *writeByteStream) Close() error { + return wbs.stream.Close() +} diff --git a/vendor/github.com/containerd/containerd/pkg/transfer/transfer.go b/vendor/github.com/containerd/containerd/pkg/transfer/transfer.go new file mode 100644 index 0000000000..01df8c3d39 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/transfer/transfer.go @@ -0,0 +1,136 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transfer + +import ( + "context" + "io" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" +) + +type Transferrer interface { + Transfer(ctx context.Context, source interface{}, destination interface{}, opts ...Opt) error +} + +type ImageResolver interface { + Resolve(ctx context.Context) (name string, desc ocispec.Descriptor, err error) +} + +type ImageFetcher interface { + ImageResolver + + Fetcher(ctx context.Context, ref string) (Fetcher, error) +} + +type ImagePusher interface { + Pusher(context.Context, ocispec.Descriptor) (Pusher, error) +} + +type Fetcher interface { + Fetch(context.Context, ocispec.Descriptor) (io.ReadCloser, error) +} + +type Pusher interface { + Push(context.Context, ocispec.Descriptor) (content.Writer, error) +} + +// ImageFilterer is used to filter out child objects of an image +type ImageFilterer interface { + ImageFilter(images.HandlerFunc, content.Store) images.HandlerFunc +} + +// ImageStorer is a type which is capable of storing images for +// the provided descriptor. The descriptor may be any type of manifest +// including an index with multiple image references. +type ImageStorer interface { + Store(context.Context, ocispec.Descriptor, images.Store) ([]images.Image, error) +} + +// ImageGetter is type which returns an image from an image store +type ImageGetter interface { + Get(context.Context, images.Store) (images.Image, error) +} + +// ImageLookup is a type which returns images from an image store +// based on names or prefixes +type ImageLookup interface { + Lookup(context.Context, images.Store) ([]images.Image, error) +} + +// ImageExporter exports images to a writer +type ImageExporter interface { + Export(context.Context, content.Store, []images.Image) error +} + +// ImageImporter imports an image into a content store +type ImageImporter interface { + Import(context.Context, content.Store) (ocispec.Descriptor, error) +} + +// ImageImportStreamer returns an import streamer based on OCI or +// Docker image tar archives. The stream should be a raw tar stream +// and without compression. +type ImageImportStreamer interface { + ImportStream(context.Context) (io.Reader, string, error) +} + +type ImageExportStreamer interface { + ExportStream(context.Context) (io.WriteCloser, string, error) +} + +type ImageUnpacker interface { + UnpackPlatforms() []UnpackConfiguration +} + +// UnpackConfiguration specifies the platform and snapshotter to use for resolving +// the unpack Platform, if snapshotter is not specified the platform default will +// be used. +type UnpackConfiguration struct { + Platform ocispec.Platform + Snapshotter string +} + +type ProgressFunc func(Progress) + +type Config struct { + Progress ProgressFunc +} + +type Opt func(*Config) + +func WithProgress(f ProgressFunc) Opt { + return func(opts *Config) { + opts.Progress = f + } +} + +// Progress is used to represent a particular progress event or incremental +// update for the provided named object. The parents represent the names of +// the objects which initiated the progress for the provided named object. +// The name and what object it represents is determined by the implementation. +type Progress struct { + Event string + Name string + Parents []string + Progress int64 + Total int64 + // Descriptor? +} diff --git a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go index f05ab7aa9f..e1c1b6cf9c 100644 --- a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go +++ b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go @@ -17,6 +17,7 @@ package ttrpcutil import ( + "context" "errors" "fmt" "sync" @@ -42,7 +43,9 @@ type Client struct { // NewClient returns a new containerd TTRPC client that is connected to the containerd instance provided by address func NewClient(address string, opts ...ttrpc.ClientOpts) (*Client, error) { connector := func() (*ttrpc.Client, error) { - conn, err := dialer.Dialer(address, ttrpcDialTimeout) + ctx, cancel := context.WithTimeout(context.Background(), ttrpcDialTimeout) + defer cancel() + conn, err := dialer.ContextDialer(ctx, address) if err != nil { return nil, fmt.Errorf("failed to connect: %w", err) } diff --git a/vendor/github.com/containerd/containerd/pkg/unpack/unpacker.go b/vendor/github.com/containerd/containerd/pkg/unpack/unpacker.go new file mode 100644 index 0000000000..b160da6521 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/unpack/unpacker.go @@ -0,0 +1,537 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package unpack + +import ( + "context" + "crypto/rand" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/pkg/cleanup" + "github.com/containerd/containerd/pkg/kmutex" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/snapshots" + "github.com/containerd/containerd/tracing" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +const ( + labelSnapshotRef = "containerd.io/snapshot.ref" + unpackSpanPrefix = "pkg.unpack.unpacker" +) + +// Result returns information about the unpacks which were completed. +type Result struct { + Unpacks int +} + +type unpackerConfig struct { + platforms []*Platform + + content content.Store + + limiter *semaphore.Weighted + duplicationSuppressor kmutex.KeyedLocker +} + +// Platform represents a platform-specific unpack configuration which includes +// the platform matcher as well as snapshotter and applier. +type Platform struct { + Platform platforms.Matcher + + SnapshotterKey string + Snapshotter snapshots.Snapshotter + SnapshotOpts []snapshots.Opt + + Applier diff.Applier + ApplyOpts []diff.ApplyOpt +} + +type UnpackerOpt func(*unpackerConfig) error + +func WithUnpackPlatform(u Platform) UnpackerOpt { + return UnpackerOpt(func(c *unpackerConfig) error { + if u.Platform == nil { + u.Platform = platforms.All + } + if u.Snapshotter == nil { + return fmt.Errorf("snapshotter must be provided to unpack") + } + if u.SnapshotterKey == "" { + if s, ok := u.Snapshotter.(fmt.Stringer); ok { + u.SnapshotterKey = s.String() + } else { + u.SnapshotterKey = "unknown" + } + } + if u.Applier == nil { + return fmt.Errorf("applier must be provided to unpack") + } + + c.platforms = append(c.platforms, &u) + + return nil + }) +} + +func WithLimiter(l *semaphore.Weighted) UnpackerOpt { + return UnpackerOpt(func(c *unpackerConfig) error { + c.limiter = l + return nil + }) +} + +func WithDuplicationSuppressor(d kmutex.KeyedLocker) UnpackerOpt { + return UnpackerOpt(func(c *unpackerConfig) error { + c.duplicationSuppressor = d + return nil + }) +} + +// Unpacker unpacks images by hooking into the image handler process. +// Unpacks happen in the backgrounds and waited on to complete. +type Unpacker struct { + unpackerConfig + + unpacks int32 + ctx context.Context + eg *errgroup.Group +} + +// NewUnpacker creates a new instance of the unpacker which can be used to wrap an +// image handler and unpack in parallel to handling. The unpacker will handle +// calling the block handlers when they are needed by the unpack process. +func NewUnpacker(ctx context.Context, cs content.Store, opts ...UnpackerOpt) (*Unpacker, error) { + eg, ctx := errgroup.WithContext(ctx) + + u := &Unpacker{ + unpackerConfig: unpackerConfig{ + content: cs, + duplicationSuppressor: kmutex.NewNoop(), + }, + ctx: ctx, + eg: eg, + } + for _, opt := range opts { + if err := opt(&u.unpackerConfig); err != nil { + return nil, err + } + } + if len(u.platforms) == 0 { + return nil, fmt.Errorf("no unpack platforms defined: %w", errdefs.ErrInvalidArgument) + } + return u, nil +} + +// Unpack wraps an image handler to filter out blob handling and scheduling them +// during the unpack process. When an image config is encountered, the unpack +// process will be started in a goroutine. +func (u *Unpacker) Unpack(h images.Handler) images.Handler { + var ( + lock sync.Mutex + layers = map[digest.Digest][]ocispec.Descriptor{} + ) + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + ctx, span := tracing.StartSpan(ctx, tracing.Name(unpackSpanPrefix, "UnpackHandler")) + defer span.End() + span.SetAttributes( + tracing.Attribute("descriptor.media.type", desc.MediaType), + tracing.Attribute("descriptor.digest", desc.Digest.String())) + unlock, err := u.lockBlobDescriptor(ctx, desc) + if err != nil { + return nil, err + } + children, err := h.Handle(ctx, desc) + unlock() + if err != nil { + return children, err + } + + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + var nonLayers []ocispec.Descriptor + var manifestLayers []ocispec.Descriptor + // Split layers from non-layers, layers will be handled after + // the config + for i, child := range children { + span.SetAttributes( + tracing.Attribute("descriptor.child."+strconv.Itoa(i), []string{child.MediaType, child.Digest.String()}), + ) + if images.IsLayerType(child.MediaType) { + manifestLayers = append(manifestLayers, child) + } else { + nonLayers = append(nonLayers, child) + } + } + + lock.Lock() + for _, nl := range nonLayers { + layers[nl.Digest] = manifestLayers + } + lock.Unlock() + + children = nonLayers + case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + lock.Lock() + l := layers[desc.Digest] + lock.Unlock() + if len(l) > 0 { + u.eg.Go(func() error { + return u.unpack(h, desc, l) + }) + } + } + return children, nil + }) +} + +// Wait waits for any ongoing unpack processes to complete then will return +// the result. +func (u *Unpacker) Wait() (Result, error) { + if err := u.eg.Wait(); err != nil { + return Result{}, err + } + return Result{ + Unpacks: int(u.unpacks), + }, nil +} + +func (u *Unpacker) unpack( + h images.Handler, + config ocispec.Descriptor, + layers []ocispec.Descriptor, +) error { + ctx := u.ctx + ctx, layerSpan := tracing.StartSpan(ctx, tracing.Name(unpackSpanPrefix, "unpack")) + defer layerSpan.End() + p, err := content.ReadBlob(ctx, u.content, config) + if err != nil { + return err + } + + var i ocispec.Image + if err := json.Unmarshal(p, &i); err != nil { + return fmt.Errorf("unmarshal image config: %w", err) + } + diffIDs := i.RootFS.DiffIDs + if len(layers) != len(diffIDs) { + return fmt.Errorf("number of layers and diffIDs don't match: %d != %d", len(layers), len(diffIDs)) + } + + // TODO: Support multiple unpacks rather than just first match + var unpack *Platform + + imgPlatform := platforms.Normalize(ocispec.Platform{OS: i.OS, Architecture: i.Architecture}) + for _, up := range u.platforms { + if up.Platform.Match(imgPlatform) { + unpack = up + break + } + } + + if unpack == nil { + return fmt.Errorf("unpacker does not support platform %s for image %s", imgPlatform, config.Digest) + } + + atomic.AddInt32(&u.unpacks, 1) + + var ( + sn = unpack.Snapshotter + a = unpack.Applier + cs = u.content + + chain []digest.Digest + + fetchOffset int + fetchC []chan struct{} + fetchErr chan error + ) + + // If there is an early return, ensure any ongoing + // fetches get their context cancelled + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + doUnpackFn := func(i int, desc ocispec.Descriptor) error { + parent := identity.ChainID(chain) + chain = append(chain, diffIDs[i]) + chainID := identity.ChainID(chain).String() + + unlock, err := u.lockSnChainID(ctx, chainID, unpack.SnapshotterKey) + if err != nil { + return err + } + defer unlock() + + if _, err := sn.Stat(ctx, chainID); err == nil { + // no need to handle + return nil + } else if !errdefs.IsNotFound(err) { + return fmt.Errorf("failed to stat snapshot %s: %w", chainID, err) + } + + // inherits annotations which are provided as snapshot labels. + snapshotLabels := snapshots.FilterInheritedLabels(desc.Annotations) + if snapshotLabels == nil { + snapshotLabels = make(map[string]string) + } + snapshotLabels[labelSnapshotRef] = chainID + + var ( + key string + mounts []mount.Mount + opts = append(unpack.SnapshotOpts, snapshots.WithLabels(snapshotLabels)) + ) + + for try := 1; try <= 3; try++ { + // Prepare snapshot with from parent, label as root + key = fmt.Sprintf(snapshots.UnpackKeyFormat, uniquePart(), chainID) + mounts, err = sn.Prepare(ctx, key, parent.String(), opts...) + if err != nil { + if errdefs.IsAlreadyExists(err) { + if _, err := sn.Stat(ctx, chainID); err != nil { + if !errdefs.IsNotFound(err) { + return fmt.Errorf("failed to stat snapshot %s: %w", chainID, err) + } + // Try again, this should be rare, log it + log.G(ctx).WithField("key", key).WithField("chainid", chainID).Debug("extraction snapshot already exists, chain id not found") + } else { + // no need to handle, snapshot now found with chain id + return nil + } + } else { + return fmt.Errorf("failed to prepare extraction snapshot %q: %w", key, err) + } + } else { + break + } + } + if err != nil { + return fmt.Errorf("unable to prepare extraction snapshot: %w", err) + } + + // Abort the snapshot if commit does not happen + abort := func(ctx context.Context) { + if err := sn.Remove(ctx, key); err != nil { + log.G(ctx).WithError(err).Errorf("failed to cleanup %q", key) + } + } + + if fetchErr == nil { + fetchErr = make(chan error, 1) + fetchOffset = i + fetchC = make([]chan struct{}, len(layers)-fetchOffset) + for i := range fetchC { + fetchC[i] = make(chan struct{}) + } + + go func(i int) { + err := u.fetch(ctx, h, layers[i:], fetchC) + if err != nil { + fetchErr <- err + } + close(fetchErr) + }(i) + } + + select { + case <-ctx.Done(): + cleanup.Do(ctx, abort) + return ctx.Err() + case err := <-fetchErr: + if err != nil { + cleanup.Do(ctx, abort) + return err + } + case <-fetchC[i-fetchOffset]: + } + + diff, err := a.Apply(ctx, desc, mounts, unpack.ApplyOpts...) + if err != nil { + cleanup.Do(ctx, abort) + return fmt.Errorf("failed to extract layer %s: %w", diffIDs[i], err) + } + if diff.Digest != diffIDs[i] { + cleanup.Do(ctx, abort) + return fmt.Errorf("wrong diff id calculated on extraction %q", diffIDs[i]) + } + + if err = sn.Commit(ctx, chainID, key, opts...); err != nil { + cleanup.Do(ctx, abort) + if errdefs.IsAlreadyExists(err) { + return nil + } + return fmt.Errorf("failed to commit snapshot %s: %w", key, err) + } + + // Set the uncompressed label after the uncompressed + // digest has been verified through apply. + cinfo := content.Info{ + Digest: desc.Digest, + Labels: map[string]string{ + labels.LabelUncompressed: diff.Digest.String(), + }, + } + if _, err := cs.Update(ctx, cinfo, "labels."+labels.LabelUncompressed); err != nil { + return err + } + return nil + } + + for i, desc := range layers { + _, layerSpan := tracing.StartSpan(ctx, tracing.Name(unpackSpanPrefix, "unpackLayer")) + layerSpan.SetAttributes( + tracing.Attribute("layer.media.type", desc.MediaType), + tracing.Attribute("layer.media.size", desc.Size), + tracing.Attribute("layer.media.digest", desc.Digest.String()), + ) + if err := doUnpackFn(i, desc); err != nil { + layerSpan.SetStatus(err) + layerSpan.End() + return err + } + layerSpan.End() + } + + chainID := identity.ChainID(chain).String() + cinfo := content.Info{ + Digest: config.Digest, + Labels: map[string]string{ + fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", unpack.SnapshotterKey): chainID, + }, + } + _, err = cs.Update(ctx, cinfo, fmt.Sprintf("labels.containerd.io/gc.ref.snapshot.%s", unpack.SnapshotterKey)) + if err != nil { + return err + } + log.G(ctx).WithFields(log.Fields{ + "config": config.Digest, + "chainID": chainID, + }).Debug("image unpacked") + + return nil +} + +func (u *Unpacker) fetch(ctx context.Context, h images.Handler, layers []ocispec.Descriptor, done []chan struct{}) error { + eg, ctx2 := errgroup.WithContext(ctx) + for i, desc := range layers { + ctx2, layerSpan := tracing.StartSpan(ctx2, tracing.Name(unpackSpanPrefix, "fetchLayer")) + layerSpan.SetAttributes( + tracing.Attribute("layer.media.type", desc.MediaType), + tracing.Attribute("layer.media.size", desc.Size), + tracing.Attribute("layer.media.digest", desc.Digest.String()), + ) + desc := desc + i := i + if err := u.acquire(ctx); err != nil { + return err + } + + eg.Go(func() error { + unlock, err := u.lockBlobDescriptor(ctx2, desc) + if err != nil { + u.release() + return err + } + + _, err = h.Handle(ctx2, desc) + + unlock() + u.release() + + if err != nil && !errors.Is(err, images.ErrSkipDesc) { + return err + } + close(done[i]) + + return nil + }) + layerSpan.End() + } + + return eg.Wait() +} + +func (u *Unpacker) acquire(ctx context.Context) error { + if u.limiter == nil { + return nil + } + return u.limiter.Acquire(ctx, 1) +} + +func (u *Unpacker) release() { + if u.limiter == nil { + return + } + u.limiter.Release(1) +} + +func (u *Unpacker) lockSnChainID(ctx context.Context, chainID, snapshotter string) (func(), error) { + key := u.makeChainIDKeyWithSnapshotter(chainID, snapshotter) + + if err := u.duplicationSuppressor.Lock(ctx, key); err != nil { + return nil, err + } + return func() { + u.duplicationSuppressor.Unlock(key) + }, nil +} + +func (u *Unpacker) lockBlobDescriptor(ctx context.Context, desc ocispec.Descriptor) (func(), error) { + key := u.makeBlobDescriptorKey(desc) + + if err := u.duplicationSuppressor.Lock(ctx, key); err != nil { + return nil, err + } + return func() { + u.duplicationSuppressor.Unlock(key) + }, nil +} + +func (u *Unpacker) makeChainIDKeyWithSnapshotter(chainID, snapshotter string) string { + return fmt.Sprintf("sn://%s/%v", snapshotter, chainID) +} + +func (u *Unpacker) makeBlobDescriptorKey(desc ocispec.Descriptor) string { + return fmt.Sprintf("blob://%v", desc.Digest) +} + +func uniquePart() string { + t := time.Now() + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + return fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) +} diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go index 4f8d7dd2d5..c67f773d0a 100644 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go +++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go index 51fb62ea71..fa5f19c427 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go index e249fe48d3..72355ca85f 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_freebsd.go b/vendor/github.com/containerd/containerd/platforms/defaults_freebsd.go new file mode 100644 index 0000000000..d3fe89e076 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_freebsd.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + }) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go index 49690f1b3e..44acc47eb3 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -1,5 +1,4 @@ -//go:build !windows && !darwin -// +build !windows,!darwin +//go:build !windows && !darwin && !freebsd /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go index fa31aaff26..d10fa9012b 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -23,7 +23,6 @@ import ( "strings" "github.com/Microsoft/hcsshim/osversion" - imagespec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/sys/windows" ) @@ -40,30 +39,32 @@ func DefaultSpec() specs.Platform { } } -type matchComparer struct { - defaults Matcher +type windowsmatcher struct { + specs.Platform osVersionPrefix string + defaultMatcher Matcher } // Match matches platform with the same windows major, minor // and build version. -func (m matchComparer) Match(p specs.Platform) bool { - match := m.defaults.Match(p) +func (m windowsmatcher) Match(p specs.Platform) bool { + match := m.defaultMatcher.Match(p) - if match && p.OS == "windows" { + if match && m.OS == "windows" { // HPC containers do not have OS version filled if p.OSVersion == "" { return true } - hostOsVersion := getOSVersion(m.osVersionPrefix) - ctrOsVersion := getOSVersion(p.OSVersion) + hostOsVersion := GetOsVersion(m.osVersionPrefix) + ctrOsVersion := GetOsVersion(p.OSVersion) return osversion.CheckHostAndContainerCompat(hostOsVersion, ctrOsVersion) } - return false + + return match } -func getOSVersion(osVersionPrefix string) osversion.OSVersion { +func GetOsVersion(osVersionPrefix string) osversion.OSVersion { parts := strings.Split(osVersionPrefix, ".") if len(parts) < 3 { return osversion.OSVersion{} @@ -83,7 +84,7 @@ func getOSVersion(osVersionPrefix string) osversion.OSVersion { // Less sorts matched platforms in front of other platforms. // For matched platforms, it puts platforms with larger revision // number in front. -func (m matchComparer) Less(p1, p2 imagespec.Platform) bool { +func (m windowsmatcher) Less(p1, p2 specs.Platform) bool { m1, m2 := m.Match(p1), m.Match(p2) if m1 && m2 { r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) @@ -104,14 +105,15 @@ func revision(v string) int { return r } +func prefix(v string) string { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return v + } + return strings.Join(parts[0:3], ".") +} + // Default returns the current platform's default platform specification. func Default() MatchComparer { - major, minor, build := windows.RtlGetNtVersionNumbers() - return matchComparer{ - defaults: Ordered(DefaultSpec(), specs.Platform{ - OS: "linux", - Architecture: runtime.GOARCH, - }), - osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build), - } + return Only(DefaultSpec()) } diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 2343099418..56613b0765 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -114,14 +114,18 @@ import ( "strconv" "strings" - "github.com/containerd/containerd/errdefs" specs "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/containerd/containerd/errdefs" ) var ( specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) ) +// Platform is a type alias for convenience, so there is no need to import image-spec package everywhere. +type Platform = specs.Platform + // Matcher matches platforms specifications, provided by an image or runtime. type Matcher interface { Match(platform specs.Platform) bool @@ -136,9 +140,7 @@ type Matcher interface { // // Applications should opt to use `Match` over directly parsing specifiers. func NewMatcher(platform specs.Platform) Matcher { - return &matcher{ - Platform: Normalize(platform), - } + return newDefaultMatcher(platform) } type matcher struct { @@ -194,6 +196,10 @@ func Parse(specifier string) (specs.Platform, error) { p.Variant = cpuVariant() } + if p.OS == "windows" { + p.OSVersion = GetWindowsOsVersion() + } + return p, nil } @@ -216,6 +222,10 @@ func Parse(specifier string) (specs.Platform, error) { p.Variant = "" } + if p.OS == "windows" { + p.OSVersion = GetWindowsOsVersion() + } + return p, nil case 3: // we have a fully specified variant, this is rare @@ -225,6 +235,10 @@ func Parse(specifier string) (specs.Platform, error) { p.Variant = "v8" } + if p.OS == "windows" { + p.OSVersion = GetWindowsOsVersion() + } + return p, nil } @@ -257,5 +271,6 @@ func Format(platform specs.Platform) string { func Normalize(platform specs.Platform) specs.Platform { platform.OS = normalizeOS(platform.OS) platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + return platform } diff --git a/vendor/github.com/containerd/containerd/platforms/platforms_other.go b/vendor/github.com/containerd/containerd/platforms/platforms_other.go new file mode 100644 index 0000000000..59beeb3d1d --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/platforms_other.go @@ -0,0 +1,34 @@ +//go:build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// NewMatcher returns the default Matcher for containerd +func newDefaultMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: Normalize(platform), + } +} + +func GetWindowsOsVersion() string { + return "" +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms_windows.go b/vendor/github.com/containerd/containerd/platforms/platforms_windows.go new file mode 100644 index 0000000000..733d18ddea --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/platforms_windows.go @@ -0,0 +1,42 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + + specs "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sys/windows" +) + +// NewMatcher returns a Windows matcher that will match on osVersionPrefix if +// the platform is Windows otherwise use the default matcher +func newDefaultMatcher(platform specs.Platform) Matcher { + prefix := prefix(platform.OSVersion) + return windowsmatcher{ + Platform: platform, + osVersionPrefix: prefix, + defaultMatcher: &matcher{ + Platform: Normalize(platform), + }, + } +} + +func GetWindowsOsVersion() string { + major, minor, build := windows.RtlGetNtVersionNumbers() + return fmt.Sprintf("%d.%d.%d", major, minor, build) +} diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go index eb38c12715..082f3178b0 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -76,8 +76,20 @@ const ( GCPlugin Type = "io.containerd.gc.v1" // EventPlugin implements event handling EventPlugin Type = "io.containerd.event.v1" + // LeasePlugin implements lease manager + LeasePlugin Type = "io.containerd.lease.v1" + // Streaming implements a stream manager + StreamingPlugin Type = "io.containerd.streaming.v1" // TracingProcessorPlugin implements a open telemetry span processor TracingProcessorPlugin Type = "io.containerd.tracing.processor.v1" + // NRIApiPlugin implements the NRI adaptation interface for containerd. + NRIApiPlugin Type = "io.containerd.nri.v1" + // TransferPlugin implements a transfer service + TransferPlugin Type = "io.containerd.transfer.v1" + // SandboxStorePlugin implements a sandbox store + SandboxStorePlugin Type = "io.containerd.sandbox.store.v1" + // SandboxControllerPlugin implements a sandbox controller + SandboxControllerPlugin Type = "io.containerd.sandbox.controller.v1" ) const ( diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go index 0df0669d29..0ee442d955 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go @@ -1,5 +1,4 @@ //go:build go1.8 && !windows && amd64 && !static_build && !gccgo -// +build go1.8,!windows,amd64,!static_build,!gccgo /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_other.go b/vendor/github.com/containerd/containerd/plugin/plugin_other.go index a2883bbbad..c0b53a6b4d 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin_other.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_other.go @@ -1,5 +1,4 @@ //go:build !go1.8 || windows || !amd64 || static_build || gccgo -// +build !go1.8 windows !amd64 static_build gccgo /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/process.go b/vendor/github.com/containerd/containerd/process.go index 297c77ab6e..73d8f86625 100644 --- a/vendor/github.com/containerd/containerd/process.go +++ b/vendor/github.com/containerd/containerd/process.go @@ -26,6 +26,7 @@ import ( "github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/protobuf" ) // Process represents a system process @@ -166,7 +167,7 @@ func (p *process) Wait(ctx context.Context) (<-chan ExitStatus, error) { } c <- ExitStatus{ code: r.ExitStatus, - exitedAt: r.ExitedAt, + exitedAt: protobuf.FromTimestamp(r.ExitedAt), } }() return c, nil @@ -226,7 +227,7 @@ func (p *process) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitS p.io.Wait() p.io.Close() } - return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil + return &ExitStatus{code: r.ExitStatus, exitedAt: protobuf.FromTimestamp(r.ExitedAt)}, nil } func (p *process) Status(ctx context.Context) (Status, error) { diff --git a/vendor/github.com/containerd/containerd/protobuf/any.go b/vendor/github.com/containerd/containerd/protobuf/any.go new file mode 100644 index 0000000000..84da2a33da --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/any.go @@ -0,0 +1,47 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package protobuf + +import ( + "github.com/containerd/typeurl/v2" + "google.golang.org/protobuf/types/known/anypb" +) + +// FromAny converts typeurl.Any to github.com/containerd/containerd/protobuf/types.Any. +func FromAny(from typeurl.Any) *anypb.Any { + if from == nil { + return nil + } + + if pbany, ok := from.(*anypb.Any); ok { + return pbany + } + + return &anypb.Any{ + TypeUrl: from.GetTypeUrl(), + Value: from.GetValue(), + } +} + +// FromAny converts an arbitrary interface to github.com/containerd/containerd/protobuf/types.Any. +func MarshalAnyToProto(from interface{}) (*anypb.Any, error) { + any, err := typeurl.MarshalAny(from) + if err != nil { + return nil, err + } + return FromAny(any), nil +} diff --git a/vendor/github.com/containerd/containerd/protobuf/compare.go b/vendor/github.com/containerd/containerd/protobuf/compare.go new file mode 100644 index 0000000000..602a4bcac5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/compare.go @@ -0,0 +1,41 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package protobuf + +import ( + "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/proto" +) + +var Compare = cmp.FilterValues( + func(x, y interface{}) bool { + _, xok := x.(proto.Message) + _, yok := y.(proto.Message) + return xok && yok + }, + cmp.Comparer(func(x, y interface{}) bool { + vx, ok := x.(proto.Message) + if !ok { + return false + } + vy, ok := y.(proto.Message) + if !ok { + return false + } + return proto.Equal(vx, vy) + }), +) diff --git a/vendor/github.com/containerd/containerd/mount/subprocess_unsafe_linux.s b/vendor/github.com/containerd/containerd/protobuf/plugin/doc.go similarity index 97% rename from vendor/github.com/containerd/containerd/mount/subprocess_unsafe_linux.s rename to vendor/github.com/containerd/containerd/protobuf/plugin/doc.go index c073fa4ad7..401a6d5ccb 100644 --- a/vendor/github.com/containerd/containerd/mount/subprocess_unsafe_linux.s +++ b/vendor/github.com/containerd/containerd/protobuf/plugin/doc.go @@ -13,3 +13,5 @@ See the License for the specific language governing permissions and limitations under the License. */ + +package plugin diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go new file mode 100644 index 0000000000..1bab0c7766 --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/protobuf/plugin/fieldpath.proto + +package plugin + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63300, + Name: "containerd.plugin.fieldpath_all", + Tag: "varint,63300,opt,name=fieldpath_all", + Filename: "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64400, + Name: "containerd.plugin.fieldpath", + Tag: "varint,64400,opt,name=fieldpath", + Filename: "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional bool fieldpath_all = 63300; + E_FieldpathAll = &file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional bool fieldpath = 64400; + E_Fieldpath = &file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_extTypes[1] +) + +var File_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_rawDesc = []byte{ + 0x0a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x11, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x43, 0x0a, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, 0x6c, 0x6c, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc4, 0xee, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x41, 0x6c, 0x6c, 0x3a, 0x3f, 0x0a, 0x09, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x90, 0xf7, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x42, 0x32, 0x5a, + 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, +} + +var file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_goTypes = []interface{}{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MessageOptions)(nil), // 1: google.protobuf.MessageOptions +} +var file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_depIdxs = []int32{ + 0, // 0: containerd.plugin.fieldpath_all:extendee -> google.protobuf.FileOptions + 1, // 1: containerd.plugin.fieldpath:extendee -> google.protobuf.MessageOptions + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 0, // [0:2] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_init() } +func file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_init() { + if File_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 2, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_depIdxs, + ExtensionInfos: file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_extTypes, + }.Build() + File_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto = out.File + file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_rawDesc = nil + file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_goTypes = nil + file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto new file mode 100644 index 0000000000..de98dd899f --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto @@ -0,0 +1,42 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package containerd.plugin; + +import "google/protobuf/descriptor.proto"; + +option go_package = "github.com/containerd/containerd/protobuf/plugin"; + +extend google.protobuf.FileOptions { + optional bool fieldpath_all = 63300; +} + +extend google.protobuf.MessageOptions { + optional bool fieldpath = 64400; +} diff --git a/vendor/github.com/containerd/containerd/protobuf/proto/proto.go b/vendor/github.com/containerd/containerd/protobuf/proto/proto.go new file mode 100644 index 0000000000..6c5e7b75e3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/proto/proto.go @@ -0,0 +1,30 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package proto provides convinient aliases that make google.golang.org/protobuf migration easier. +package proto + +import ( + google "google.golang.org/protobuf/proto" +) + +func Marshal(input google.Message) ([]byte, error) { + return google.Marshal(input) +} + +func Unmarshal(input []byte, output google.Message) error { + return google.Unmarshal(input, output) +} diff --git a/vendor/github.com/containerd/containerd/protobuf/timestamp.go b/vendor/github.com/containerd/containerd/protobuf/timestamp.go new file mode 100644 index 0000000000..0615f823b0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/timestamp.go @@ -0,0 +1,36 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package protobuf + +import ( + "time" + + "google.golang.org/protobuf/types/known/timestamppb" +) + +// Once we migrate off from gogo/protobuf, we can use the function below, which don't return any errors. +// https://github.com/protocolbuffers/protobuf-go/blob/v1.28.0/types/known/timestamppb/timestamp.pb.go#L200-L208 + +// ToTimestamp creates protobuf's Timestamp from time.Time. +func ToTimestamp(from time.Time) *timestamppb.Timestamp { + return timestamppb.New(from) +} + +// FromTimestamp creates time.Time from protobuf's Timestamp. +func FromTimestamp(from *timestamppb.Timestamp) time.Time { + return from.AsTime() +} diff --git a/vendor/github.com/containerd/containerd/protobuf/types/types.go b/vendor/github.com/containerd/containerd/protobuf/types/types.go new file mode 100644 index 0000000000..1a920a2f98 --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/types/types.go @@ -0,0 +1,28 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package types provides convinient aliases that make google.golang.org/protobuf migration easier. +package types + +import ( + "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/emptypb" +) + +type Empty = emptypb.Empty +type Any = anypb.Any +type FieldMask = field_mask.FieldMask diff --git a/vendor/github.com/containerd/containerd/pull.go b/vendor/github.com/containerd/containerd/pull.go index 92f7719b1f..5d96c8cc72 100644 --- a/vendor/github.com/containerd/containerd/pull.go +++ b/vendor/github.com/containerd/containerd/pull.go @@ -23,19 +23,28 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/pkg/unpack" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" - "github.com/containerd/containerd/remotes/docker/schema1" + "github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/containerd/containerd/tracing" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/sync/errgroup" "golang.org/x/sync/semaphore" ) +const ( + pullSpanPrefix = "pull" +) + // Pull downloads the provided content into containerd's content store // and returns a platform specific image object func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Image, retErr error) { + ctx, span := tracing.StartSpan(ctx, tracing.Name(pullSpanPrefix, "Pull")) + defer span.End() + pullCtx := defaultRemoteContext() + for _, o := range opts { if err := o(c, pullCtx); err != nil { return nil, err @@ -57,25 +66,60 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima } } + span.SetAttributes( + tracing.Attribute("image.ref", ref), + tracing.Attribute("unpack", pullCtx.Unpack), + tracing.Attribute("max.concurrent.downloads", pullCtx.MaxConcurrentDownloads), + tracing.Attribute("platforms.count", len(pullCtx.Platforms)), + ) + ctx, done, err := c.WithLease(ctx) if err != nil { return nil, err } defer done(ctx) - var unpacks int32 - var unpackEg *errgroup.Group - var unpackWrapper func(f images.Handler) images.Handler + var unpacker *unpack.Unpacker if pullCtx.Unpack { - // unpacker only supports schema 2 image, for schema 1 this is noop. - u, err := c.newUnpacker(ctx, pullCtx) + snapshotterName, err := c.resolveSnapshotterName(ctx, pullCtx.Snapshotter) if err != nil { - return nil, fmt.Errorf("create unpacker: %w", err) + return nil, fmt.Errorf("unable to resolve snapshotter: %w", err) + } + span.SetAttributes(tracing.Attribute("snapshotter.name", snapshotterName)) + var uconfig UnpackConfig + for _, opt := range pullCtx.UnpackOpts { + if err := opt(ctx, &uconfig); err != nil { + return nil, err + } + } + var platformMatcher platforms.Matcher + if !uconfig.CheckPlatformSupported { + platformMatcher = platforms.All + } + + // Check client Unpack config + platform := unpack.Platform{ + Platform: platformMatcher, + SnapshotterKey: snapshotterName, + Snapshotter: c.SnapshotService(snapshotterName), + SnapshotOpts: append(pullCtx.SnapshotterOpts, uconfig.SnapshotOpts...), + Applier: c.DiffService(), + ApplyOpts: uconfig.ApplyOpts, + } + uopts := []unpack.UnpackerOpt{unpack.WithUnpackPlatform(platform)} + if pullCtx.MaxConcurrentDownloads > 0 { + uopts = append(uopts, unpack.WithLimiter(semaphore.NewWeighted(int64(pullCtx.MaxConcurrentDownloads)))) + } + if uconfig.DuplicationSuppressor != nil { + uopts = append(uopts, unpack.WithDuplicationSuppressor(uconfig.DuplicationSuppressor)) + } + unpacker, err = unpack.NewUnpacker(ctx, c.ContentStore(), uopts...) + if err != nil { + return nil, fmt.Errorf("unable to initialize unpacker: %w", err) } - unpackWrapper, unpackEg = u.handlerWrapper(ctx, pullCtx, &unpacks) defer func() { - if err := unpackEg.Wait(); err != nil { + if _, err := unpacker.Wait(); err != nil { if retErr == nil { retErr = fmt.Errorf("unpack: %w", err) } @@ -84,9 +128,9 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima wrapper := pullCtx.HandlerWrapper pullCtx.HandlerWrapper = func(h images.Handler) images.Handler { if wrapper == nil { - return unpackWrapper(h) + return unpacker.Unpack(h) } - return unpackWrapper(wrapper(h)) + return unpacker.Unpack(wrapper(h)) } } @@ -98,12 +142,15 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima // NOTE(fuweid): unpacker defers blobs download. before create image // record in ImageService, should wait for unpacking(including blobs // download). - if pullCtx.Unpack { - if unpackEg != nil { - if err := unpackEg.Wait(); err != nil { - return nil, err - } + var ur unpack.Result + if unpacker != nil { + _, unpackSpan := tracing.StartSpan(ctx, tracing.Name(pullSpanPrefix, "UnpackWait")) + if ur, err = unpacker.Wait(); err != nil { + unpackSpan.SetStatus(err) + unpackSpan.End() + return nil, err } + unpackSpan.End() } img, err = c.createNewImage(ctx, img) @@ -112,14 +159,13 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima } i := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher) + span.SetAttributes(tracing.Attribute("image.ref", i.Name())) - if pullCtx.Unpack { - if unpacks == 0 { - // Try to unpack is none is done previously. - // This is at least required for schema 1 image. - if err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil { - return nil, fmt.Errorf("failed to unpack image on snapshotter %s: %w", pullCtx.Snapshotter, err) - } + if unpacker != nil && ur.Unpacks == 0 { + // Unpack was tried previously but nothing was unpacked + // This is at least required for schema 1 image. + if err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil { + return nil, fmt.Errorf("failed to unpack image on snapshotter %s: %w", pullCtx.Snapshotter, err) } } @@ -127,6 +173,8 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima } func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, limit int) (images.Image, error) { + ctx, span := tracing.StartSpan(ctx, tracing.Name(pullSpanPrefix, "fetch")) + defer span.End() store := c.ContentStore() name, desc, err := rCtx.Resolver.Resolve(ctx, ref) if err != nil { @@ -230,6 +278,8 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim } func (c *Client) createNewImage(ctx context.Context, img images.Image) (images.Image, error) { + ctx, span := tracing.StartSpan(ctx, tracing.Name(pullSpanPrefix, "pull.createNewImage")) + defer span.End() is := c.ImageService() for { if created, err := is.Create(ctx, img); err != nil { diff --git a/vendor/github.com/containerd/containerd/reference/docker/helpers.go b/vendor/github.com/containerd/containerd/reference/docker/helpers.go new file mode 100644 index 0000000000..386025104a --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/helpers.go @@ -0,0 +1,58 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/normalize.go b/vendor/github.com/containerd/containerd/reference/docker/normalize.go new file mode 100644 index 0000000000..b299bf6c06 --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/normalize.go @@ -0,0 +1,196 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remoteName) + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost" && strings.ToLower(name[:i]) == name[:i]) { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/reference.go b/vendor/github.com/containerd/containerd/reference/docker/reference.go index 1ef223d6ba..4dc00474ee 100644 --- a/vendor/github.com/containerd/containerd/reference/docker/reference.go +++ b/vendor/github.com/containerd/containerd/reference/docker/reference.go @@ -21,7 +21,9 @@ // // reference := name [ ":" tag ] [ "@" digest ] // name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] +// domain := host [':' port-number] +// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A +// domain-name := domain-component ['.' domain-component]* // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // path-component := alpha-numeric [separator alpha-numeric]* @@ -43,8 +45,6 @@ package docker import ( "errors" "fmt" - "path" - "regexp" "strings" "github.com/opencontainers/go-digest" @@ -451,349 +451,3 @@ func (c canonicalReference) String() string { func (c canonicalReference) Digest() digest.Digest { return c.digest } - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // DomainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - DomainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(DomainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(DomainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// ParseDockerRef normalizes the image reference following the docker convention. This is added -// mainly for backward compatibility. -// The reference returned can only be either tagged or digested. For reference contains both tag -// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ -// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. -func ParseDockerRef(ref string) (Named, error) { - named, err := ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - if _, ok := named.(NamedTagged); ok { - if canonical, ok := named.(Canonical); ok { - // The reference is both tagged and digested, only - // return digested. - newNamed, err := WithName(canonical.Name()) - if err != nil { - return nil, err - } - newCanonical, err := WithDigest(newNamed, canonical.Digest()) - if err != nil { - return nil, err - } - return newCanonical, nil - } - } - return TagNameOnly(named), nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/containerd/containerd/reference/docker/regexp.go b/vendor/github.com/containerd/containerd/reference/docker/regexp.go new file mode 100644 index 0000000000..4be3c575e0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/regexp.go @@ -0,0 +1,191 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import "regexp" + +var ( + // alphaNumeric defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumeric = `[a-z0-9]+` + + // separator defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. Repeated dashes and underscores are intentionally treated + // differently. In order to support valid hostnames as name components, + // supporting repeated dash was added. Additionally double underscore is + // now allowed as a separator to loosen the restriction for previously + // supported names. + separator = `(?:[._]|__|[-]*)` + + // nameComponent restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponent = expression( + alphaNumeric, + optional(repeated(separator, alphaNumeric))) + + // domainNameComponent restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp. + domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` + + // ipv6address are enclosed between square brackets and may be represented + // in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format + // are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as + // IPv4-Mapped are deliberately excluded. + ipv6address = expression( + literal(`[`), `(?:[a-fA-F0-9:]+)`, literal(`]`), + ) + + // domainName defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. This includes IPv4 addresses on decimal format. + domainName = expression( + domainNameComponent, + optional(repeated(literal(`.`), domainNameComponent)), + ) + + // host defines the structure of potential domains based on the URI + // Host subcomponent on rfc3986. It may be a subset of DNS domain name, + // or an IPv4 address in decimal format, or an IPv6 address between square + // brackets (excluding zone identifiers as defined by rfc6874 or special + // addresses such as IPv4-Mapped). + host = `(?:` + domainName + `|` + ipv6address + `)` + + // allowed by the URI Host subcomponent on rfc3986 to ensure backwards + // compatibility with Docker image names. + domain = expression( + host, + optional(literal(`:`), `[0-9]+`)) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = regexp.MustCompile(domain) + + tag = `[\w][\w.-]{0,127}` + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = regexp.MustCompile(tag) + + anchoredTag = anchored(tag) + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = regexp.MustCompile(anchoredTag) + + digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` + // DigestRegexp matches valid digests. + DigestRegexp = regexp.MustCompile(digestPat) + + anchoredDigest = anchored(digestPat) + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = regexp.MustCompile(anchoredDigest) + + namePat = expression( + optional(domain, literal(`/`)), + nameComponent, + optional(repeated(literal(`/`), nameComponent))) + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = regexp.MustCompile(namePat) + + anchoredName = anchored( + optional(capture(domain), literal(`/`)), + capture(nameComponent, + optional(repeated(literal(`/`), nameComponent)))) + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = regexp.MustCompile(anchoredName) + + referencePat = anchored(capture(namePat), + optional(literal(":"), capture(tag)), + optional(literal("@"), capture(digestPat))) + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = regexp.MustCompile(referencePat) + + identifier = `([a-f0-9]{64})` + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = regexp.MustCompile(identifier) + + shortIdentifier = `([a-f0-9]{6,64})` + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = regexp.MustCompile(shortIdentifier) + + anchoredIdentifier = anchored(identifier) + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = regexp.MustCompile(anchoredIdentifier) +) + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) string { + re := regexp.MustCompile(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re.String() +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...string) string { + var s string + for _, re := range res { + s += re + } + + return s +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...string) string { + return group(expression(res...)) + `?` +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...string) string { + return group(expression(res...)) + `+` +} + +// group wraps the regexp in a non-capturing group. +func group(res ...string) string { + return `(?:` + expression(res...) + `)` +} + +// capture wraps the expression in a capturing group. +func capture(res ...string) string { + return `(` + expression(res...) + `)` +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...string) string { + return `^` + expression(res...) + `$` +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/sort.go b/vendor/github.com/containerd/containerd/reference/docker/sort.go new file mode 100644 index 0000000000..984e37528d --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/sort.go @@ -0,0 +1,73 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "sort" +) + +// Sort sorts string references preferring higher information references +// The precedence is as follows: +// 1. Name + Tag + Digest +// 2. Name + Tag +// 3. Name + Digest +// 4. Name +// 5. Digest +// 6. Parse error +func Sort(references []string) []string { + var prefs []Reference + var bad []string + + for _, ref := range references { + pref, err := ParseAnyReference(ref) + if err != nil { + bad = append(bad, ref) + } else { + prefs = append(prefs, pref) + } + } + sort.Slice(prefs, func(a, b int) bool { + ar := refRank(prefs[a]) + br := refRank(prefs[b]) + if ar == br { + return prefs[a].String() < prefs[b].String() + } + return ar < br + }) + sort.Strings(bad) + var refs []string + for _, pref := range prefs { + refs = append(refs, pref.String()) + } + return append(refs, bad...) +} + +func refRank(ref Reference) uint8 { + if _, ok := ref.(Named); ok { + if _, ok = ref.(Tagged); ok { + if _, ok = ref.(Digested); ok { + return 1 + } + return 2 + } + if _, ok = ref.(Digested); ok { + return 3 + } + return 4 + } + return 5 +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go b/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go index c259873d2a..64c6a38f91 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go @@ -29,7 +29,6 @@ import ( "github.com/containerd/containerd/log" remoteserrors "github.com/containerd/containerd/remotes/errors" "github.com/containerd/containerd/version" - "golang.org/x/net/context/ctxhttp" ) var ( @@ -115,7 +114,7 @@ func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http. form.Set("access_type", "offline") } - req, err := http.NewRequest("POST", to.Realm, strings.NewReader(form.Encode())) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, to.Realm, strings.NewReader(form.Encode())) if err != nil { return nil, err } @@ -127,7 +126,7 @@ func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http. req.Header.Set("User-Agent", "containerd/"+version.Version) } - resp, err := ctxhttp.Do(ctx, client, req) + resp, err := client.Do(req) if err != nil { return nil, err } @@ -162,7 +161,7 @@ type FetchTokenResponse struct { // FetchToken fetches a token using a GET request func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) { - req, err := http.NewRequest("GET", to.Realm, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, to.Realm, nil) if err != nil { return nil, err } @@ -194,7 +193,7 @@ func FetchToken(ctx context.Context, client *http.Client, headers http.Header, t req.URL.RawQuery = reqParams.Encode() - resp, err := ctxhttp.Do(ctx, client, req) + resp, err := client.Do(req) if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go index eaa0e5dbdb..8fc823144e 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -29,7 +29,6 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes/docker/auth" remoteerrors "github.com/containerd/containerd/remotes/errors" - "github.com/sirupsen/logrus" ) type dockerAuthorizer struct { @@ -45,13 +44,6 @@ type dockerAuthorizer struct { onFetchRefreshToken OnFetchRefreshToken } -// NewAuthorizer creates a Docker authorizer using the provided function to -// get credentials for the token server or basic auth. -// Deprecated: Use NewDockerAuthorizer -func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer { - return NewDockerAuthorizer(WithAuthClient(client), WithAuthCreds(f)) -} - type authorizerConfig struct { credentials func(string) (string, string, error) client *http.Client @@ -319,7 +311,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st } return resp.Token, resp.RefreshToken, nil } - log.G(ctx).WithFields(logrus.Fields{ + log.G(ctx).WithFields(log.Fields{ "status": errStatus.Status, "body": string(errStatus.Body), }).Debugf("token request failed") diff --git a/vendor/github.com/containerd/containerd/remotes/docker/converter_fuzz.go b/vendor/github.com/containerd/containerd/remotes/docker/converter_fuzz.go new file mode 100644 index 0000000000..9082053924 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/converter_fuzz.go @@ -0,0 +1,55 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "os" + + fuzz "github.com/AdaLogics/go-fuzz-headers" + "github.com/containerd/containerd/content/local" + "github.com/containerd/containerd/log" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +func FuzzConvertManifest(data []byte) int { + ctx := context.Background() + + // Do not log the message below + // level=warning msg="do nothing for media type: ..." + log.G(ctx).Logger.SetLevel(logrus.PanicLevel) + + f := fuzz.NewConsumer(data) + desc := ocispec.Descriptor{} + err := f.GenerateStruct(&desc) + if err != nil { + return 0 + } + tmpdir, err := os.MkdirTemp("", "fuzzing-") + if err != nil { + return 0 + } + cs, err := local.NewStore(tmpdir) + if err != nil { + return 0 + } + _, _ = ConvertManifest(ctx, cs, desc) + return 1 +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go index 11a75356e8..ecf245933f 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go @@ -29,6 +29,7 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" + digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -52,18 +53,17 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { // firstly try fetch via external urls for _, us := range desc.URLs { - ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", us)) - u, err := url.Parse(us) if err != nil { - log.G(ctx).WithError(err).Debug("failed to parse") + log.G(ctx).WithError(err).Debugf("failed to parse %q", us) continue } if u.Scheme != "http" && u.Scheme != "https" { log.G(ctx).Debug("non-http(s) alternative url is unsupported") continue } - log.G(ctx).Debug("trying alternative url") + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) + log.G(ctx).Info("request") // Try this first, parse it host := RegistryHost{ @@ -151,8 +151,106 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R }) } +func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, ps ...string) (*request, int64, error) { + headReq := r.request(host, http.MethodHead, ps...) + if err := headReq.addNamespace(r.refspec.Hostname()); err != nil { + return nil, 0, err + } + + headResp, err := headReq.doWithRetries(ctx, nil) + if err != nil { + return nil, 0, err + } + if headResp.Body != nil { + headResp.Body.Close() + } + if headResp.StatusCode > 299 { + return nil, 0, fmt.Errorf("unexpected HEAD status code %v: %s", headReq.String(), headResp.Status) + } + + getReq := r.request(host, http.MethodGet, ps...) + if err := getReq.addNamespace(r.refspec.Hostname()); err != nil { + return nil, 0, err + } + return getReq, headResp.ContentLength, nil +} + +func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest) (io.ReadCloser, ocispec.Descriptor, error) { + var desc ocispec.Descriptor + ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", dgst)) + + hosts := r.filterHosts(HostCapabilityPull) + if len(hosts) == 0 { + return nil, desc, fmt.Errorf("no pull hosts: %w", errdefs.ErrNotFound) + } + + ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false) + if err != nil { + return nil, desc, err + } + + var ( + getReq *request + sz int64 + firstErr error + ) + + for _, host := range r.hosts { + getReq, sz, err = r.createGetReq(ctx, host, "blobs", dgst.String()) + if err == nil { + break + } + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + } + + if getReq == nil { + // Fall back to the "manifests" endpoint + for _, host := range r.hosts { + getReq, sz, err = r.createGetReq(ctx, host, "manifests", dgst.String()) + if err == nil { + break + } + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + } + } + + if getReq == nil { + if errdefs.IsNotFound(firstErr) { + firstErr = fmt.Errorf("could not fetch content %v from remote: %w", dgst, errdefs.ErrNotFound) + } + if firstErr == nil { + firstErr = fmt.Errorf("could not fetch content %v from remote: (unknown)", dgst) + } + return nil, desc, firstErr + } + + seeker, err := newHTTPReadSeeker(sz, func(offset int64) (io.ReadCloser, error) { + return r.open(ctx, getReq, "", offset) + }) + if err != nil { + return nil, desc, err + } + + desc = ocispec.Descriptor{ + MediaType: "application/octet-stream", + Digest: dgst, + Size: sz, + } + return seeker, desc, nil +} + func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (_ io.ReadCloser, retErr error) { - req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) + if mediatype == "" { + req.header.Set("Accept", "*/*") + } else { + req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) + } if offset > 0 { // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher_fuzz.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher_fuzz.go new file mode 100644 index 0000000000..b98886c595 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher_fuzz.go @@ -0,0 +1,81 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + + refDocker "github.com/containerd/containerd/reference/docker" +) + +func FuzzFetcher(data []byte) int { + dataLen := len(data) + if dataLen == 0 { + return -1 + } + + s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Set("content-range", fmt.Sprintf("bytes %d-%d/%d", 0, dataLen-1, dataLen)) + rw.Header().Set("content-length", fmt.Sprintf("%d", dataLen)) + rw.Write(data) + })) + defer s.Close() + + u, err := url.Parse(s.URL) + if err != nil { + return 0 + } + + f := dockerFetcher{&dockerBase{ + repository: "nonempty", + }} + host := RegistryHost{ + Client: s.Client(), + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + } + + ctx := context.Background() + req := f.request(host, http.MethodGet) + rc, err := f.open(ctx, req, "", 0) + if err != nil { + return 0 + } + b, err := io.ReadAll(rc) + if err != nil { + return 0 + } + + expected := data + if len(b) != len(expected) { + panic("len of request is not equal to len of expected but should be") + } + return 1 +} + +func FuzzParseDockerRef(data []byte) int { + _, _ = refDocker.ParseDockerRef(string(data)) + return 1 +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/handler.go b/vendor/github.com/containerd/containerd/remotes/docker/handler.go index 529cfbc274..27638ccc02 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/handler.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/handler.go @@ -30,11 +30,6 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) -var ( - // labelDistributionSource describes the source blob comes from. - labelDistributionSource = "containerd.io/distribution.source" -) - // AppendDistributionSourceLabel updates the label of blob with distribution source. func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) { refspec, err := reference.Parse(ref) @@ -108,7 +103,7 @@ func appendDistributionSourceLabel(originLabel, repo string) string { } func distributionSourceLabelKey(source string) string { - return fmt.Sprintf("%s.%s", labelDistributionSource, source) + return fmt.Sprintf("%s.%s", labels.LabelDistributionSource, source) } // selectRepositoryMountCandidate will select the repo which has longest diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go index bef77fa61d..ef6e8056a0 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -191,6 +191,9 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str if resp == nil { resp, err = req.doWithRetries(ctx, nil) if err != nil { + if errors.Is(err, ErrInvalidAuthorization) { + return nil, fmt.Errorf("push access denied, repository does not exist or may require authorization: %w", err) + } return nil, err } } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index 13f500e4d2..3d6c0182f1 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -32,12 +32,12 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" - "github.com/containerd/containerd/remotes/docker/schema1" + "github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + remoteerrors "github.com/containerd/containerd/remotes/errors" + "github.com/containerd/containerd/tracing" "github.com/containerd/containerd/version" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" - "golang.org/x/net/context/ctxhttp" ) var ( @@ -70,6 +70,9 @@ type Authorizer interface { // unmodified. It may also add an `Authorization` header as // "bearer " // "basic " + // + // It may return remotes/errors.ErrUnexpectedStatus, which for example, + // can be used by the caller to find out the status code returned by the registry. Authorize(context.Context, *http.Request) error // AddResponses adds a 401 response for the authorizer to consider when @@ -160,7 +163,8 @@ func NewResolver(options ResolverOptions) remotes.Resolver { images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageManifest, - ocispec.MediaTypeImageIndex, "*/*"}, ", ")) + ocispec.MediaTypeImageIndex, "*/*", + }, ", ")) } else { resolveHeader["Accept"] = options.Headers["Accept"] delete(options.Headers, "Accept") @@ -307,11 +311,11 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp if resp.StatusCode > 399 { // Set firstErr when encountering the first non-404 status code. if firstErr == nil { - firstErr = fmt.Errorf("pulling from host %s failed with status code %v: %v", host.Host, u, resp.Status) + firstErr = remoteerrors.NewUnexpectedStatusErr(resp) } continue // try another host } - return "", ocispec.Descriptor{}, fmt.Errorf("pulling from host %s failed with unexpected status code %v: %v", host.Host, u, resp.Status) + return "", ocispec.Descriptor{}, remoteerrors.NewUnexpectedStatusErr(resp) } size := resp.ContentLength contentType := getManifestMediaType(resp) @@ -348,26 +352,31 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp if err != nil { return "", ocispec.Descriptor{}, err } - defer resp.Body.Close() bodyReader := countingReader{reader: resp.Body} contentType = getManifestMediaType(resp) - if dgst == "" { + err = func() error { + defer resp.Body.Close() + if dgst != "" { + _, err = io.Copy(io.Discard, &bodyReader) + return err + } + if contentType == images.MediaTypeDockerSchema1Manifest { b, err := schema1.ReadStripSignature(&bodyReader) if err != nil { - return "", ocispec.Descriptor{}, err + return err } dgst = digest.FromBytes(b) - } else { - dgst, err = digest.FromReader(&bodyReader) - if err != nil { - return "", ocispec.Descriptor{}, err - } + return nil } - } else if _, err := io.Copy(io.Discard, &bodyReader); err != nil { + + dgst, err = digest.FromReader(&bodyReader) + return err + }() + if err != nil { return "", ocispec.Descriptor{}, err } size = bodyReader.bytesRead @@ -533,7 +542,7 @@ type request struct { func (r *request) do(ctx context.Context) (*http.Response, error) { u := r.host.Scheme + "://" + r.host.Host + r.path - req, err := http.NewRequest(r.method, u, nil) + req, err := http.NewRequestWithContext(ctx, r.method, u, nil) if err != nil { return nil, err } @@ -560,7 +569,7 @@ func (r *request) do(ctx context.Context) (*http.Response, error) { return nil, fmt.Errorf("failed to authorize: %w", err) } - var client = &http.Client{} + client := &http.Client{} if r.host.Client != nil { *client = *r.host.Client } @@ -575,11 +584,18 @@ func (r *request) do(ctx context.Context) (*http.Response, error) { return nil } } - - resp, err := ctxhttp.Do(ctx, client, req) + _, httpSpan := tracing.StartSpan( + ctx, + tracing.Name("remotes.docker.resolver", "HTTPRequest"), + tracing.WithHTTPRequest(req), + ) + defer httpSpan.End() + resp, err := client.Do(req) if err != nil { + httpSpan.SetStatus(err) return nil, fmt.Errorf("failed to do request: %w", err) } + httpSpan.SetAttributes(tracing.HTTPStatusCodeAttributes(resp.StatusCode)...) log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received") return resp, nil } @@ -639,7 +655,7 @@ func (r *request) String() string { return r.host.Scheme + "://" + r.host.Host + r.path } -func requestFields(req *http.Request) logrus.Fields { +func requestFields(req *http.Request) log.Fields { fields := map[string]interface{}{ "request.method": req.Method, } @@ -657,10 +673,10 @@ func requestFields(req *http.Request) logrus.Fields { } } - return logrus.Fields(fields) + return fields } -func responseFields(resp *http.Response) logrus.Fields { +func responseFields(resp *http.Response) log.Fields { fields := map[string]interface{}{ "response.status": resp.Status, } @@ -675,7 +691,7 @@ func responseFields(resp *http.Response) logrus.Fields { } } - return logrus.Fields(fields) + return fields } // IsLocalhost checks if the registry host is local. diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go index efa4e8d6ee..8c9e520cd2 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go @@ -14,6 +14,9 @@ limitations under the License. */ +// Package schema1 provides a converter to fetch an image formatted in Docker Image Manifest v2, Schema 1. +// +// Deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1. package schema1 import ( @@ -33,6 +36,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes" digest "github.com/opencontainers/go-digest" @@ -363,12 +367,12 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro cinfo := content.Info{ Digest: desc.Digest, Labels: map[string]string{ - "containerd.io/uncompressed": state.diffID.String(), + labels.LabelUncompressed: state.diffID.String(), labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty), }, } - if _, err := c.contentStore.Update(ctx, cinfo, "labels.containerd.io/uncompressed", fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil { + if _, err := c.contentStore.Update(ctx, cinfo, "labels."+labels.LabelUncompressed, fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil { return fmt.Errorf("failed to update uncompressed label: %w", err) } @@ -387,7 +391,7 @@ func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descri } desc.Size = cinfo.Size - diffID, ok := cinfo.Labels["containerd.io/uncompressed"] + diffID, ok := cinfo.Labels[labels.LabelUncompressed] if !ok { return false, nil } @@ -406,7 +410,7 @@ func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descri bState := blobState{empty: isEmpty} if bState.diffID, err = digest.Parse(diffID); err != nil { - log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label containerd.io/uncompressed: %v", diffID) + log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label %s: %v", labels.LabelUncompressed, diffID) return false, nil } diff --git a/vendor/github.com/containerd/containerd/remotes/errors/errors.go b/vendor/github.com/containerd/containerd/remotes/errors/errors.go index 67ccb23df6..f60ff0fc28 100644 --- a/vendor/github.com/containerd/containerd/remotes/errors/errors.go +++ b/vendor/github.com/containerd/containerd/remotes/errors/errors.go @@ -33,7 +33,7 @@ type ErrUnexpectedStatus struct { } func (e ErrUnexpectedStatus) Error() string { - return fmt.Sprintf("unexpected status: %s", e.Status) + return fmt.Sprintf("unexpected status from %s request to %s: %s", e.RequestMethod, e.RequestURL, e.Status) } // NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go index 4d91ed2e54..0ff39179c2 100644 --- a/vendor/github.com/containerd/containerd/remotes/handlers.go +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -17,6 +17,7 @@ package remotes import ( + "bytes" "context" "errors" "fmt" @@ -27,10 +28,10 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" ) @@ -90,7 +91,7 @@ func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { // recursive fetch. func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc { return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{ "digest": desc.Digest, "mediatype": desc.MediaType, "size": desc.Size, @@ -100,20 +101,21 @@ func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc case images.MediaTypeDockerSchema1Manifest: return nil, fmt.Errorf("%v not supported", desc.MediaType) default: - err := fetch(ctx, ingester, fetcher, desc) + err := Fetch(ctx, ingester, fetcher, desc) + if errdefs.IsAlreadyExists(err) { + return nil, nil + } return nil, err } } } -func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error { +// Fetch fetches the given digest into the provided ingester +func Fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error { log.G(ctx).Debug("fetch") cw, err := content.OpenWriter(ctx, ingester, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) if err != nil { - if errdefs.IsAlreadyExists(err) { - return nil - } return err } defer cw.Close() @@ -135,7 +137,11 @@ func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc if err != nil && !errdefs.IsAlreadyExists(err) { return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) } - return nil + return err + } + + if desc.Size == int64(len(desc.Data)) { + return content.Copy(ctx, cw, bytes.NewReader(desc.Data), desc.Size, desc.Digest) } rc, err := fetcher.Fetch(ctx, desc) @@ -151,7 +157,7 @@ func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc // using a writer from the pusher. func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc { return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{ "digest": desc.Digest, "mediatype": desc.MediaType, "size": desc.Size, @@ -197,17 +203,26 @@ func push(ctx context.Context, provider content.Provider, pusher Pusher, desc oc // // Base handlers can be provided which will be called before any push specific // handlers. -func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { +// +// If the passed in content.Provider is also a content.InfoProvider (such as +// content.Manager) then this will also annotate the distribution sources using +// labels prefixed with "containerd.io/distribution.source". +func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Provider, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { var m sync.Mutex - manifestStack := []ocispec.Descriptor{} + manifests := []ocispec.Descriptor{} + indexStack := []ocispec.Descriptor{} filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, - images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: m.Lock() - manifestStack = append(manifestStack, desc) + manifests = append(manifests, desc) + m.Unlock() + return nil, images.ErrStopHandler + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + m.Lock() + indexStack = append(indexStack, desc) m.Unlock() return nil, images.ErrStopHandler default: @@ -219,13 +234,14 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform) - annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, store) + var handler images.Handler + if m, ok := store.(content.InfoProvider); ok { + annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, m) + handler = images.Handlers(annotateHandler, filterHandler, pushHandler) + } else { + handler = images.Handlers(platformFilterhandler, filterHandler, pushHandler) + } - var handler images.Handler = images.Handlers( - annotateHandler, - filterHandler, - pushHandler, - ) if wrapper != nil { handler = wrapper(handler) } @@ -234,16 +250,18 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st return err } + if err := images.Dispatch(ctx, pushHandler, limiter, manifests...); err != nil { + return err + } + // Iterate in reverse order as seen, parent always uploaded after child - for i := len(manifestStack) - 1; i >= 0; i-- { - _, err := pushHandler(ctx, manifestStack[i]) + for i := len(indexStack) - 1; i >= 0; i-- { + err := images.Dispatch(ctx, pushHandler, limiter, indexStack[i]) if err != nil { // TODO(estesp): until we have a more complete method for index push, we need to report // missing dependencies in an index/manifest list by sensing the "400 Bad Request" // as a marker for this problem - if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex || - manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) && - errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") { + if errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") { return fmt.Errorf("manifest list/index references to blobs and/or manifests are missing in your target registry: %w", err) } return err @@ -327,14 +345,15 @@ func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) // annotateDistributionSourceHandler add distribution source label into // annotation of config or blob descriptor. -func annotateDistributionSourceHandler(f images.HandlerFunc, manager content.Manager) images.HandlerFunc { +func annotateDistributionSourceHandler(f images.HandlerFunc, provider content.InfoProvider) images.HandlerFunc { return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { children, err := f(ctx, desc) if err != nil { return nil, err } - // only add distribution source for the config or blob data descriptor + // Distribution source is only used for config or blob but may be inherited from + // a manifest or manifest list switch desc.MediaType { case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: @@ -342,16 +361,32 @@ func annotateDistributionSourceHandler(f images.HandlerFunc, manager content.Man return children, nil } + // parentInfo can be used to inherit info for non-existent blobs + var parentInfo *content.Info + for i := range children { child := children[i] - info, err := manager.Info(ctx, child.Digest) + info, err := provider.Info(ctx, child.Digest) if err != nil { - return nil, err + if !errdefs.IsNotFound(err) { + return nil, err + } + if parentInfo == nil { + pi, err := provider.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + parentInfo = &pi + } + // Blob may not exist locally, annotate with parent labels for cross repo + // mount or fetch. Parent sources may apply to all children since most + // registries enforce that children exist before the manifests. + info = *parentInfo } for k, v := range info.Labels { - if !strings.HasPrefix(k, "containerd.io/distribution.source.") { + if !strings.HasPrefix(k, labels.LabelDistributionSource+".") { continue } diff --git a/vendor/github.com/containerd/containerd/remotes/resolver.go b/vendor/github.com/containerd/containerd/remotes/resolver.go index 624b14f05d..f200c84bc7 100644 --- a/vendor/github.com/containerd/containerd/remotes/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/resolver.go @@ -21,6 +21,7 @@ import ( "io" "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -33,7 +34,7 @@ type Resolver interface { // reference a specific host or be matched against a specific handler. // // The returned name should be used to identify the referenced entity. - // Dependending on the remote namespace, this may be immutable or mutable. + // Depending on the remote namespace, this may be immutable or mutable. // While the name may differ from ref, it should itself be a valid ref. // // If the resolution fails, an error will be returned. @@ -50,12 +51,23 @@ type Resolver interface { Pusher(ctx context.Context, ref string) (Pusher, error) } -// Fetcher fetches content +// Fetcher fetches content. +// A fetcher implementation may implement the FetcherByDigest interface too. type Fetcher interface { // Fetch the resource identified by the descriptor. Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) } +// FetcherByDigest fetches content by the digest. +type FetcherByDigest interface { + // FetchByDigest fetches the resource identified by the digest. + // + // FetcherByDigest usually returns an incomplete descriptor. + // Typically, the media type is always set to "application/octet-stream", + // and the annotations are unset. + FetchByDigest(ctx context.Context, dgst digest.Digest) (io.ReadCloser, ocispec.Descriptor, error) +} + // Pusher pushes content type Pusher interface { // Push returns a content writer for the given resource identified diff --git a/vendor/github.com/containerd/containerd/rootfs/diff.go b/vendor/github.com/containerd/containerd/rootfs/diff.go index 226cebccf2..da9dbe2752 100644 --- a/vendor/github.com/containerd/containerd/rootfs/diff.go +++ b/vendor/github.com/containerd/containerd/rootfs/diff.go @@ -22,7 +22,7 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/cleanup" "github.com/containerd/containerd/snapshots" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -32,13 +32,6 @@ import ( // the content creation and the provided snapshotter and mount differ are used // for calculating the diff. The descriptor for the layer diff is returned. func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter, d diff.Comparer, opts ...diff.Opt) (ocispec.Descriptor, error) { - // dctx is used to handle cleanup things just in case the param ctx - // has been canceled, which causes that the defer cleanup fails. - dctx := context.Background() - if ns, ok := namespaces.Namespace(ctx); ok { - dctx = namespaces.WithNamespace(dctx, ns) - } - info, err := sn.Stat(ctx, snapshotID) if err != nil { return ocispec.Descriptor{}, err @@ -49,7 +42,9 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter if err != nil { return ocispec.Descriptor{}, err } - defer sn.Remove(dctx, lowerKey) + defer cleanup.Do(ctx, func(ctx context.Context) { + sn.Remove(ctx, lowerKey) + }) var upper []mount.Mount if info.Kind == snapshots.KindActive { @@ -63,7 +58,9 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter if err != nil { return ocispec.Descriptor{}, err } - defer sn.Remove(dctx, upperKey) + defer cleanup.Do(ctx, func(ctx context.Context) { + sn.Remove(ctx, upperKey) + }) } return d.Compare(ctx, lower, upper, opts...) diff --git a/vendor/github.com/containerd/containerd/rootfs/init_other.go b/vendor/github.com/containerd/containerd/rootfs/init_other.go index d8e38d4c78..049cff2880 100644 --- a/vendor/github.com/containerd/containerd/rootfs/init_other.go +++ b/vendor/github.com/containerd/containerd/rootfs/init_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/next.pb.txt b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/next.pb.txt index cdf0e9ddce..eb94415b42 100644 --- a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/next.pb.txt +++ b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/next.pb.txt @@ -1,7 +1,6 @@ file { name: "github.com/containerd/containerd/runtime/linux/runctypes/runc.proto" package: "containerd.linux.runc" - dependency: "gogoproto/gogo.proto" message_type { name: "RuncOptions" field { @@ -23,6 +22,9 @@ file { number: 3 label: LABEL_OPTIONAL type: TYPE_STRING + options { + deprecated: true + } json_name: "criuPath" } field { @@ -206,6 +208,5 @@ file { options { go_package: "github.com/containerd/containerd/runtime/linux/runctypes;runctypes" } - weak_dependency: 0 syntax: "proto3" } diff --git a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go index 46d31ff59a..37f3329bce 100644 --- a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go +++ b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go @@ -1,1811 +1,581 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/runtime/linux/runctypes/runc.proto package runctypes import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type RuncOptions struct { - Runtime string `protobuf:"bytes,1,opt,name=runtime,proto3" json:"runtime,omitempty"` - RuntimeRoot string `protobuf:"bytes,2,opt,name=runtime_root,json=runtimeRoot,proto3" json:"runtime_root,omitempty"` - CriuPath string `protobuf:"bytes,3,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"` - SystemdCgroup bool `protobuf:"varint,4,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Runtime string `protobuf:"bytes,1,opt,name=runtime,proto3" json:"runtime,omitempty"` + RuntimeRoot string `protobuf:"bytes,2,opt,name=runtime_root,json=runtimeRoot,proto3" json:"runtime_root,omitempty"` + // criu binary path. + // + // Deprecated: runc option --criu is now ignored (with a warning), and the + // option will be removed entirely in a future release. Users who need a non- + // standard criu binary should rely on the standard way of looking up binaries + // in $PATH. + // + // Deprecated: Do not use. + CriuPath string `protobuf:"bytes,3,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"` + SystemdCgroup bool `protobuf:"varint,4,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"` } -func (m *RuncOptions) Reset() { *m = RuncOptions{} } -func (*RuncOptions) ProtoMessage() {} -func (*RuncOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_d20e2ba8b3cc58b9, []int{0} -} -func (m *RuncOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuncOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RuncOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *RuncOptions) Reset() { + *x = RuncOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *RuncOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuncOptions.Merge(m, src) -} -func (m *RuncOptions) XXX_Size() int { - return m.Size() -} -func (m *RuncOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RuncOptions.DiscardUnknown(m) + +func (x *RuncOptions) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_RuncOptions proto.InternalMessageInfo +func (*RuncOptions) ProtoMessage() {} + +func (x *RuncOptions) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuncOptions.ProtoReflect.Descriptor instead. +func (*RuncOptions) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescGZIP(), []int{0} +} + +func (x *RuncOptions) GetRuntime() string { + if x != nil { + return x.Runtime + } + return "" +} + +func (x *RuncOptions) GetRuntimeRoot() string { + if x != nil { + return x.RuntimeRoot + } + return "" +} + +// Deprecated: Do not use. +func (x *RuncOptions) GetCriuPath() string { + if x != nil { + return x.CriuPath + } + return "" +} + +func (x *RuncOptions) GetSystemdCgroup() bool { + if x != nil { + return x.SystemdCgroup + } + return false +} type CreateOptions struct { - NoPivotRoot bool `protobuf:"varint,1,opt,name=no_pivot_root,json=noPivotRoot,proto3" json:"no_pivot_root,omitempty"` - OpenTcp bool `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"` - ExternalUnixSockets bool `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"` - Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` - FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"` - EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"` - CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"` - NoNewKeyring bool `protobuf:"varint,8,opt,name=no_new_keyring,json=noNewKeyring,proto3" json:"no_new_keyring,omitempty"` - ShimCgroup string `protobuf:"bytes,9,opt,name=shim_cgroup,json=shimCgroup,proto3" json:"shim_cgroup,omitempty"` - IoUid uint32 `protobuf:"varint,10,opt,name=io_uid,json=ioUid,proto3" json:"io_uid,omitempty"` - IoGid uint32 `protobuf:"varint,11,opt,name=io_gid,json=ioGid,proto3" json:"io_gid,omitempty"` - CriuWorkPath string `protobuf:"bytes,12,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"` - CriuImagePath string `protobuf:"bytes,13,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NoPivotRoot bool `protobuf:"varint,1,opt,name=no_pivot_root,json=noPivotRoot,proto3" json:"no_pivot_root,omitempty"` + OpenTcp bool `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"` + ExternalUnixSockets bool `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"` + Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` + FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"` + EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"` + CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"` + NoNewKeyring bool `protobuf:"varint,8,opt,name=no_new_keyring,json=noNewKeyring,proto3" json:"no_new_keyring,omitempty"` + ShimCgroup string `protobuf:"bytes,9,opt,name=shim_cgroup,json=shimCgroup,proto3" json:"shim_cgroup,omitempty"` + IoUid uint32 `protobuf:"varint,10,opt,name=io_uid,json=ioUid,proto3" json:"io_uid,omitempty"` + IoGid uint32 `protobuf:"varint,11,opt,name=io_gid,json=ioGid,proto3" json:"io_gid,omitempty"` + CriuWorkPath string `protobuf:"bytes,12,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"` + CriuImagePath string `protobuf:"bytes,13,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"` } -func (m *CreateOptions) Reset() { *m = CreateOptions{} } -func (*CreateOptions) ProtoMessage() {} -func (*CreateOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_d20e2ba8b3cc58b9, []int{1} -} -func (m *CreateOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CreateOptions) Reset() { + *x = CreateOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CreateOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateOptions.Merge(m, src) -} -func (m *CreateOptions) XXX_Size() int { - return m.Size() -} -func (m *CreateOptions) XXX_DiscardUnknown() { - xxx_messageInfo_CreateOptions.DiscardUnknown(m) + +func (x *CreateOptions) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CreateOptions proto.InternalMessageInfo +func (*CreateOptions) ProtoMessage() {} + +func (x *CreateOptions) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateOptions.ProtoReflect.Descriptor instead. +func (*CreateOptions) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateOptions) GetNoPivotRoot() bool { + if x != nil { + return x.NoPivotRoot + } + return false +} + +func (x *CreateOptions) GetOpenTcp() bool { + if x != nil { + return x.OpenTcp + } + return false +} + +func (x *CreateOptions) GetExternalUnixSockets() bool { + if x != nil { + return x.ExternalUnixSockets + } + return false +} + +func (x *CreateOptions) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +func (x *CreateOptions) GetFileLocks() bool { + if x != nil { + return x.FileLocks + } + return false +} + +func (x *CreateOptions) GetEmptyNamespaces() []string { + if x != nil { + return x.EmptyNamespaces + } + return nil +} + +func (x *CreateOptions) GetCgroupsMode() string { + if x != nil { + return x.CgroupsMode + } + return "" +} + +func (x *CreateOptions) GetNoNewKeyring() bool { + if x != nil { + return x.NoNewKeyring + } + return false +} + +func (x *CreateOptions) GetShimCgroup() string { + if x != nil { + return x.ShimCgroup + } + return "" +} + +func (x *CreateOptions) GetIoUid() uint32 { + if x != nil { + return x.IoUid + } + return 0 +} + +func (x *CreateOptions) GetIoGid() uint32 { + if x != nil { + return x.IoGid + } + return 0 +} + +func (x *CreateOptions) GetCriuWorkPath() string { + if x != nil { + return x.CriuWorkPath + } + return "" +} + +func (x *CreateOptions) GetCriuImagePath() string { + if x != nil { + return x.CriuImagePath + } + return "" +} type CheckpointOptions struct { - Exit bool `protobuf:"varint,1,opt,name=exit,proto3" json:"exit,omitempty"` - OpenTcp bool `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"` - ExternalUnixSockets bool `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"` - Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` - FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"` - EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"` - CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"` - WorkPath string `protobuf:"bytes,8,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"` - ImagePath string `protobuf:"bytes,9,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exit bool `protobuf:"varint,1,opt,name=exit,proto3" json:"exit,omitempty"` + OpenTcp bool `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"` + ExternalUnixSockets bool `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"` + Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` + FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"` + EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"` + CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"` + WorkPath string `protobuf:"bytes,8,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"` + ImagePath string `protobuf:"bytes,9,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"` } -func (m *CheckpointOptions) Reset() { *m = CheckpointOptions{} } -func (*CheckpointOptions) ProtoMessage() {} -func (*CheckpointOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_d20e2ba8b3cc58b9, []int{2} -} -func (m *CheckpointOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CheckpointOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckpointOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CheckpointOptions) Reset() { + *x = CheckpointOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CheckpointOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckpointOptions.Merge(m, src) -} -func (m *CheckpointOptions) XXX_Size() int { - return m.Size() -} -func (m *CheckpointOptions) XXX_DiscardUnknown() { - xxx_messageInfo_CheckpointOptions.DiscardUnknown(m) + +func (x *CheckpointOptions) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CheckpointOptions proto.InternalMessageInfo +func (*CheckpointOptions) ProtoMessage() {} + +func (x *CheckpointOptions) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckpointOptions.ProtoReflect.Descriptor instead. +func (*CheckpointOptions) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescGZIP(), []int{2} +} + +func (x *CheckpointOptions) GetExit() bool { + if x != nil { + return x.Exit + } + return false +} + +func (x *CheckpointOptions) GetOpenTcp() bool { + if x != nil { + return x.OpenTcp + } + return false +} + +func (x *CheckpointOptions) GetExternalUnixSockets() bool { + if x != nil { + return x.ExternalUnixSockets + } + return false +} + +func (x *CheckpointOptions) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +func (x *CheckpointOptions) GetFileLocks() bool { + if x != nil { + return x.FileLocks + } + return false +} + +func (x *CheckpointOptions) GetEmptyNamespaces() []string { + if x != nil { + return x.EmptyNamespaces + } + return nil +} + +func (x *CheckpointOptions) GetCgroupsMode() string { + if x != nil { + return x.CgroupsMode + } + return "" +} + +func (x *CheckpointOptions) GetWorkPath() string { + if x != nil { + return x.WorkPath + } + return "" +} + +func (x *CheckpointOptions) GetImagePath() string { + if x != nil { + return x.ImagePath + } + return "" +} type ProcessDetails struct { - ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *ProcessDetails) Reset() { + *x = ProcessDetails{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProcessDetails) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ProcessDetails) Reset() { *m = ProcessDetails{} } func (*ProcessDetails) ProtoMessage() {} + +func (x *ProcessDetails) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessDetails.ProtoReflect.Descriptor instead. func (*ProcessDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_d20e2ba8b3cc58b9, []int{3} -} -func (m *ProcessDetails) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProcessDetails) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProcessDetails.Merge(m, src) -} -func (m *ProcessDetails) XXX_Size() int { - return m.Size() -} -func (m *ProcessDetails) XXX_DiscardUnknown() { - xxx_messageInfo_ProcessDetails.DiscardUnknown(m) + return file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescGZIP(), []int{3} } -var xxx_messageInfo_ProcessDetails proto.InternalMessageInfo - -func init() { - proto.RegisterType((*RuncOptions)(nil), "containerd.linux.runc.RuncOptions") - proto.RegisterType((*CreateOptions)(nil), "containerd.linux.runc.CreateOptions") - proto.RegisterType((*CheckpointOptions)(nil), "containerd.linux.runc.CheckpointOptions") - proto.RegisterType((*ProcessDetails)(nil), "containerd.linux.runc.ProcessDetails") +func (x *ProcessDetails) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" } -func init() { - proto.RegisterFile("github.com/containerd/containerd/runtime/linux/runctypes/runc.proto", fileDescriptor_d20e2ba8b3cc58b9) -} +var File_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto protoreflect.FileDescriptor -var fileDescriptor_d20e2ba8b3cc58b9 = []byte{ - // 604 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x94, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0xeb, 0xfe, 0x49, 0x9c, 0x49, 0xd2, 0xc2, 0x42, 0x25, 0xd3, 0xaa, 0x69, 0x08, 0x7f, - 0x14, 0x2e, 0xa9, 0x04, 0xe2, 0xc4, 0xad, 0x29, 0x42, 0x15, 0x50, 0x2a, 0x43, 0x05, 0x42, 0x48, - 0x2b, 0x77, 0x3d, 0x24, 0xab, 0xc4, 0x3b, 0x96, 0x77, 0x4d, 0x92, 0x1b, 0x4f, 0xc0, 0x0b, 0xf1, - 0x02, 0x3d, 0x21, 0x8e, 0x9c, 0x10, 0xcd, 0x93, 0xa0, 0x5d, 0xc7, 0x69, 0xcf, 0x1c, 0xb9, 0xcd, - 0xfc, 0xe6, 0xb3, 0x67, 0xf4, 0x7d, 0xb2, 0xa1, 0x3f, 0x90, 0x66, 0x98, 0x9f, 0xf7, 0x04, 0x25, - 0x07, 0x82, 0x94, 0x89, 0xa4, 0xc2, 0x2c, 0xbe, 0x5e, 0x66, 0xb9, 0x32, 0x32, 0xc1, 0x83, 0xb1, - 0x54, 0xf9, 0xd4, 0x76, 0xc2, 0xcc, 0x52, 0xd4, 0xae, 0xea, 0xa5, 0x19, 0x19, 0x62, 0xdb, 0x57, - 0xf2, 0x9e, 0x93, 0xf5, 0xec, 0x70, 0xe7, 0xf6, 0x80, 0x06, 0xe4, 0x14, 0x07, 0xb6, 0x2a, 0xc4, - 0x9d, 0x6f, 0x1e, 0xd4, 0xc3, 0x5c, 0x89, 0x37, 0xa9, 0x91, 0xa4, 0x34, 0x0b, 0xa0, 0xba, 0x58, - 0x11, 0x78, 0x6d, 0xaf, 0x5b, 0x0b, 0xcb, 0x96, 0xdd, 0x85, 0xc6, 0xa2, 0xe4, 0x19, 0x91, 0x09, - 0x56, 0xdd, 0xb8, 0xbe, 0x60, 0x21, 0x91, 0x61, 0xbb, 0x50, 0x13, 0x99, 0xcc, 0x79, 0x1a, 0x99, - 0x61, 0xb0, 0xe6, 0xe6, 0xbe, 0x05, 0xa7, 0x91, 0x19, 0xb2, 0x07, 0xb0, 0xa9, 0x67, 0xda, 0x60, - 0x12, 0x73, 0x31, 0xc8, 0x28, 0x4f, 0x83, 0xf5, 0xb6, 0xd7, 0xf5, 0xc3, 0xe6, 0x82, 0xf6, 0x1d, - 0xec, 0xfc, 0x58, 0x83, 0x66, 0x3f, 0xc3, 0xc8, 0x60, 0x79, 0x52, 0x07, 0x9a, 0x8a, 0x78, 0x2a, - 0xbf, 0x90, 0x29, 0x36, 0x7b, 0xee, 0xb9, 0xba, 0xa2, 0x53, 0xcb, 0xdc, 0xe6, 0x3b, 0xe0, 0x53, - 0x8a, 0x8a, 0x1b, 0x91, 0xba, 0xc3, 0xfc, 0xb0, 0x6a, 0xfb, 0x77, 0x22, 0x65, 0x8f, 0x61, 0x1b, - 0xa7, 0x06, 0x33, 0x15, 0x8d, 0x79, 0xae, 0xe4, 0x94, 0x6b, 0x12, 0x23, 0x34, 0xda, 0x1d, 0xe8, - 0x87, 0xb7, 0xca, 0xe1, 0x99, 0x92, 0xd3, 0xb7, 0xc5, 0x88, 0xed, 0x80, 0x6f, 0x30, 0x4b, 0xa4, - 0x8a, 0xc6, 0x8b, 0x2b, 0x97, 0x3d, 0xdb, 0x03, 0xf8, 0x2c, 0xc7, 0xc8, 0xc7, 0x24, 0x46, 0x3a, - 0xd8, 0x70, 0xd3, 0x9a, 0x25, 0xaf, 0x2c, 0x60, 0x8f, 0xe0, 0x06, 0x26, 0xa9, 0x99, 0x71, 0x15, - 0x25, 0xa8, 0xd3, 0x48, 0xa0, 0x0e, 0x2a, 0xed, 0xb5, 0x6e, 0x2d, 0xdc, 0x72, 0xfc, 0x64, 0x89, - 0xad, 0xa3, 0x85, 0x13, 0x9a, 0x27, 0x14, 0x63, 0x50, 0x2d, 0x1c, 0x5d, 0xb0, 0xd7, 0x14, 0x23, - 0xbb, 0x0f, 0x9b, 0x8a, 0xb8, 0xc2, 0x09, 0x1f, 0xe1, 0x2c, 0x93, 0x6a, 0x10, 0xf8, 0x6e, 0x61, - 0x43, 0xd1, 0x09, 0x4e, 0x5e, 0x16, 0x8c, 0xed, 0x43, 0x5d, 0x0f, 0x65, 0x52, 0xfa, 0x5a, 0x73, - 0xef, 0x01, 0x8b, 0x0a, 0x53, 0xd9, 0x36, 0x54, 0x24, 0xf1, 0x5c, 0xc6, 0x01, 0xb4, 0xbd, 0x6e, - 0x33, 0xdc, 0x90, 0x74, 0x26, 0xe3, 0x05, 0x1e, 0xc8, 0x38, 0xa8, 0x97, 0xf8, 0x85, 0x8c, 0xed, - 0x52, 0x17, 0xe3, 0x84, 0xb2, 0x51, 0x91, 0x65, 0xc3, 0xbd, 0xb1, 0x61, 0xe9, 0x7b, 0xca, 0x46, - 0x2e, 0xcf, 0x87, 0xb0, 0xe5, 0x54, 0x32, 0x89, 0x06, 0x58, 0xc8, 0x9a, 0x4e, 0xd6, 0xb4, 0xf8, - 0xd8, 0x52, 0xab, 0xeb, 0x7c, 0x5f, 0x85, 0x9b, 0xfd, 0x21, 0x8a, 0x51, 0x4a, 0x52, 0x99, 0x32, - 0x54, 0x06, 0xeb, 0x38, 0x95, 0x65, 0x96, 0xae, 0xfe, 0x6f, 0x43, 0xdc, 0x85, 0xda, 0x95, 0x95, - 0x7e, 0xf1, 0x59, 0x4c, 0x4a, 0x1b, 0xf7, 0x00, 0xae, 0x39, 0x58, 0x44, 0x57, 0x93, 0x4b, 0xf7, - 0x9e, 0xc2, 0xe6, 0x69, 0x46, 0x02, 0xb5, 0x3e, 0x42, 0x13, 0xc9, 0xb1, 0x66, 0xf7, 0xa0, 0x8a, - 0x53, 0x14, 0x5c, 0xc6, 0xc5, 0x17, 0x7a, 0x08, 0xf3, 0xdf, 0xfb, 0x95, 0xe7, 0x53, 0x14, 0xc7, - 0x47, 0x61, 0xc5, 0x8e, 0x8e, 0xe3, 0xc3, 0x4f, 0x17, 0x97, 0xad, 0x95, 0x5f, 0x97, 0xad, 0x95, - 0xaf, 0xf3, 0x96, 0x77, 0x31, 0x6f, 0x79, 0x3f, 0xe7, 0x2d, 0xef, 0xcf, 0xbc, 0xe5, 0x7d, 0x3c, - 0xfc, 0xd7, 0x5f, 0xcc, 0xb3, 0x65, 0xf5, 0x61, 0xe5, 0xbc, 0xe2, 0xfe, 0x1e, 0x4f, 0xfe, 0x06, - 0x00, 0x00, 0xff, 0xff, 0x7f, 0x24, 0x6f, 0x2e, 0xb1, 0x04, 0x00, 0x00, -} - -func (m *RuncOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuncOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuncOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.SystemdCgroup { - i-- - if m.SystemdCgroup { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.CriuPath) > 0 { - i -= len(m.CriuPath) - copy(dAtA[i:], m.CriuPath) - i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuPath))) - i-- - dAtA[i] = 0x1a - } - if len(m.RuntimeRoot) > 0 { - i -= len(m.RuntimeRoot) - copy(dAtA[i:], m.RuntimeRoot) - i = encodeVarintRunc(dAtA, i, uint64(len(m.RuntimeRoot))) - i-- - dAtA[i] = 0x12 - } - if len(m.Runtime) > 0 { - i -= len(m.Runtime) - copy(dAtA[i:], m.Runtime) - i = encodeVarintRunc(dAtA, i, uint64(len(m.Runtime))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CreateOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.CriuImagePath) > 0 { - i -= len(m.CriuImagePath) - copy(dAtA[i:], m.CriuImagePath) - i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuImagePath))) - i-- - dAtA[i] = 0x6a - } - if len(m.CriuWorkPath) > 0 { - i -= len(m.CriuWorkPath) - copy(dAtA[i:], m.CriuWorkPath) - i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuWorkPath))) - i-- - dAtA[i] = 0x62 - } - if m.IoGid != 0 { - i = encodeVarintRunc(dAtA, i, uint64(m.IoGid)) - i-- - dAtA[i] = 0x58 - } - if m.IoUid != 0 { - i = encodeVarintRunc(dAtA, i, uint64(m.IoUid)) - i-- - dAtA[i] = 0x50 - } - if len(m.ShimCgroup) > 0 { - i -= len(m.ShimCgroup) - copy(dAtA[i:], m.ShimCgroup) - i = encodeVarintRunc(dAtA, i, uint64(len(m.ShimCgroup))) - i-- - dAtA[i] = 0x4a - } - if m.NoNewKeyring { - i-- - if m.NoNewKeyring { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - if len(m.CgroupsMode) > 0 { - i -= len(m.CgroupsMode) - copy(dAtA[i:], m.CgroupsMode) - i = encodeVarintRunc(dAtA, i, uint64(len(m.CgroupsMode))) - i-- - dAtA[i] = 0x3a - } - if len(m.EmptyNamespaces) > 0 { - for iNdEx := len(m.EmptyNamespaces) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.EmptyNamespaces[iNdEx]) - copy(dAtA[i:], m.EmptyNamespaces[iNdEx]) - i = encodeVarintRunc(dAtA, i, uint64(len(m.EmptyNamespaces[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if m.FileLocks { - i-- - if m.FileLocks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.ExternalUnixSockets { - i-- - if m.ExternalUnixSockets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.OpenTcp { - i-- - if m.OpenTcp { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.NoPivotRoot { - i-- - if m.NoPivotRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CheckpointOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckpointOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CheckpointOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ImagePath) > 0 { - i -= len(m.ImagePath) - copy(dAtA[i:], m.ImagePath) - i = encodeVarintRunc(dAtA, i, uint64(len(m.ImagePath))) - i-- - dAtA[i] = 0x4a - } - if len(m.WorkPath) > 0 { - i -= len(m.WorkPath) - copy(dAtA[i:], m.WorkPath) - i = encodeVarintRunc(dAtA, i, uint64(len(m.WorkPath))) - i-- - dAtA[i] = 0x42 - } - if len(m.CgroupsMode) > 0 { - i -= len(m.CgroupsMode) - copy(dAtA[i:], m.CgroupsMode) - i = encodeVarintRunc(dAtA, i, uint64(len(m.CgroupsMode))) - i-- - dAtA[i] = 0x3a - } - if len(m.EmptyNamespaces) > 0 { - for iNdEx := len(m.EmptyNamespaces) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.EmptyNamespaces[iNdEx]) - copy(dAtA[i:], m.EmptyNamespaces[iNdEx]) - i = encodeVarintRunc(dAtA, i, uint64(len(m.EmptyNamespaces[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if m.FileLocks { - i-- - if m.FileLocks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.ExternalUnixSockets { - i-- - if m.ExternalUnixSockets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.OpenTcp { - i-- - if m.OpenTcp { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.Exit { - i-- - if m.Exit { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ProcessDetails) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProcessDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintRunc(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintRunc(dAtA []byte, offset int, v uint64) int { - offset -= sovRunc(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *RuncOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Runtime) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - l = len(m.RuntimeRoot) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - l = len(m.CriuPath) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - if m.SystemdCgroup { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NoPivotRoot { - n += 2 - } - if m.OpenTcp { - n += 2 - } - if m.ExternalUnixSockets { - n += 2 - } - if m.Terminal { - n += 2 - } - if m.FileLocks { - n += 2 - } - if len(m.EmptyNamespaces) > 0 { - for _, s := range m.EmptyNamespaces { - l = len(s) - n += 1 + l + sovRunc(uint64(l)) - } - } - l = len(m.CgroupsMode) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - if m.NoNewKeyring { - n += 2 - } - l = len(m.ShimCgroup) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - if m.IoUid != 0 { - n += 1 + sovRunc(uint64(m.IoUid)) - } - if m.IoGid != 0 { - n += 1 + sovRunc(uint64(m.IoGid)) - } - l = len(m.CriuWorkPath) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - l = len(m.CriuImagePath) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CheckpointOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Exit { - n += 2 - } - if m.OpenTcp { - n += 2 - } - if m.ExternalUnixSockets { - n += 2 - } - if m.Terminal { - n += 2 - } - if m.FileLocks { - n += 2 - } - if len(m.EmptyNamespaces) > 0 { - for _, s := range m.EmptyNamespaces { - l = len(s) - n += 1 + l + sovRunc(uint64(l)) - } - } - l = len(m.CgroupsMode) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - l = len(m.WorkPath) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - l = len(m.ImagePath) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ProcessDetails) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovRunc(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozRunc(x uint64) (n int) { - return sovRunc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *RuncOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuncOptions{`, - `Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`, - `RuntimeRoot:` + fmt.Sprintf("%v", this.RuntimeRoot) + `,`, - `CriuPath:` + fmt.Sprintf("%v", this.CriuPath) + `,`, - `SystemdCgroup:` + fmt.Sprintf("%v", this.SystemdCgroup) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateOptions{`, - `NoPivotRoot:` + fmt.Sprintf("%v", this.NoPivotRoot) + `,`, - `OpenTcp:` + fmt.Sprintf("%v", this.OpenTcp) + `,`, - `ExternalUnixSockets:` + fmt.Sprintf("%v", this.ExternalUnixSockets) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `FileLocks:` + fmt.Sprintf("%v", this.FileLocks) + `,`, - `EmptyNamespaces:` + fmt.Sprintf("%v", this.EmptyNamespaces) + `,`, - `CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`, - `NoNewKeyring:` + fmt.Sprintf("%v", this.NoNewKeyring) + `,`, - `ShimCgroup:` + fmt.Sprintf("%v", this.ShimCgroup) + `,`, - `IoUid:` + fmt.Sprintf("%v", this.IoUid) + `,`, - `IoGid:` + fmt.Sprintf("%v", this.IoGid) + `,`, - `CriuWorkPath:` + fmt.Sprintf("%v", this.CriuWorkPath) + `,`, - `CriuImagePath:` + fmt.Sprintf("%v", this.CriuImagePath) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CheckpointOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CheckpointOptions{`, - `Exit:` + fmt.Sprintf("%v", this.Exit) + `,`, - `OpenTcp:` + fmt.Sprintf("%v", this.OpenTcp) + `,`, - `ExternalUnixSockets:` + fmt.Sprintf("%v", this.ExternalUnixSockets) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `FileLocks:` + fmt.Sprintf("%v", this.FileLocks) + `,`, - `EmptyNamespaces:` + fmt.Sprintf("%v", this.EmptyNamespaces) + `,`, - `CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`, - `WorkPath:` + fmt.Sprintf("%v", this.WorkPath) + `,`, - `ImagePath:` + fmt.Sprintf("%v", this.ImagePath) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ProcessDetails) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ProcessDetails{`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringRunc(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *RuncOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuncOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuncOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Runtime = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeRoot", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuntimeRoot = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CriuPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CriuPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SystemdCgroup", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SystemdCgroup = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRunc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRunc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoPivotRoot", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NoPivotRoot = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OpenTcp", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.OpenTcp = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalUnixSockets", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ExternalUnixSockets = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FileLocks", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.FileLocks = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EmptyNamespaces", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EmptyNamespaces = append(m.EmptyNamespaces, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CgroupsMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CgroupsMode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoNewKeyring", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NoNewKeyring = bool(v != 0) - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShimCgroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ShimCgroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IoUid", wireType) - } - m.IoUid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.IoUid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IoGid", wireType) - } - m.IoGid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.IoGid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CriuWorkPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CriuWorkPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CriuImagePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CriuImagePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRunc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRunc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CheckpointOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckpointOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckpointOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exit", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Exit = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OpenTcp", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.OpenTcp = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalUnixSockets", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ExternalUnixSockets = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FileLocks", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.FileLocks = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EmptyNamespaces", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EmptyNamespaces = append(m.EmptyNamespaces, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CgroupsMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CgroupsMode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WorkPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImagePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRunc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRunc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProcessDetails) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProcessDetails: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProcessDetails: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRunc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRunc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRunc(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRunc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRunc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRunc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthRunc - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupRunc - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthRunc - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDesc = []byte{ + 0x0a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x6c, 0x69, 0x6e, 0x75, 0x78, + 0x2f, 0x72, 0x75, 0x6e, 0x63, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x72, 0x75, 0x6e, 0x63, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x6c, 0x69, 0x6e, 0x75, 0x78, 0x2e, 0x72, 0x75, 0x6e, 0x63, 0x22, 0x92, 0x01, 0x0a, + 0x0b, 0x52, 0x75, 0x6e, 0x63, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1f, 0x0a, 0x09, 0x63, 0x72, 0x69, + 0x75, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, + 0x52, 0x08, 0x63, 0x72, 0x69, 0x75, 0x50, 0x61, 0x74, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x64, 0x5f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0d, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x43, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x22, 0xce, 0x03, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x6f, 0x5f, 0x70, 0x69, 0x76, 0x6f, 0x74, 0x5f, + 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6e, 0x6f, 0x50, 0x69, + 0x76, 0x6f, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x6e, 0x5f, + 0x74, 0x63, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x6e, 0x54, + 0x63, 0x70, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x75, + 0x6e, 0x69, 0x78, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x13, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x55, 0x6e, 0x69, 0x78, 0x53, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x6b, + 0x73, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x6d, 0x70, + 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x72, 0x69, 0x6e, + 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6e, 0x6f, 0x4e, 0x65, 0x77, 0x4b, 0x65, + 0x79, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x68, 0x69, 0x6d, 0x5f, 0x63, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x68, 0x69, 0x6d, + 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x6f, 0x5f, 0x75, 0x69, 0x64, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6f, 0x55, 0x69, 0x64, 0x12, 0x15, 0x0a, + 0x06, 0x69, 0x6f, 0x5f, 0x67, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, + 0x6f, 0x47, 0x69, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x77, 0x6f, 0x72, + 0x6b, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x72, + 0x69, 0x75, 0x57, 0x6f, 0x72, 0x6b, 0x50, 0x61, 0x74, 0x68, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x72, + 0x69, 0x75, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x72, 0x69, 0x75, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x50, 0x61, + 0x74, 0x68, 0x22, 0xbb, 0x02, 0x0a, 0x11, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x69, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x65, 0x78, 0x69, 0x74, 0x12, 0x19, 0x0a, 0x08, + 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x74, 0x63, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x6f, 0x70, 0x65, 0x6e, 0x54, 0x63, 0x70, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x55, 0x6e, 0x69, 0x78, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x74, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x6c, + 0x65, 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x5f, 0x6d, 0x6f, 0x64, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, + 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x61, 0x74, + 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x50, 0x61, 0x74, 0x68, + 0x22, 0x29, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x42, 0x44, 0x5a, 0x42, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x6c, 0x69, 0x6e, 0x75, 0x78, 0x2f, 0x72, 0x75, + 0x6e, 0x63, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x72, 0x75, 0x6e, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthRunc = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRunc = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupRunc = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescData = file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDesc ) + +func file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescData) + }) + return file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescData +} + +var file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_goTypes = []interface{}{ + (*RuncOptions)(nil), // 0: containerd.linux.runc.RuncOptions + (*CreateOptions)(nil), // 1: containerd.linux.runc.CreateOptions + (*CheckpointOptions)(nil), // 2: containerd.linux.runc.CheckpointOptions + (*ProcessDetails)(nil), // 3: containerd.linux.runc.ProcessDetails +} +var file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_init() } +func file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_init() { + if File_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuncOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckpointOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto = out.File + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDesc = nil + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_goTypes = nil + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.proto b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.proto index 78e3abf4cb..5e2d675fbf 100644 --- a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.proto +++ b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.proto @@ -2,14 +2,18 @@ syntax = "proto3"; package containerd.linux.runc; -import weak "gogoproto/gogo.proto"; - option go_package = "github.com/containerd/containerd/runtime/linux/runctypes;runctypes"; message RuncOptions { string runtime = 1; string runtime_root = 2; - string criu_path = 3; + // criu binary path. + // + // Deprecated: runc option --criu is now ignored (with a warning), and the + // option will be removed entirely in a future release. Users who need a non- + // standard criu binary should rely on the standard way of looking up binaries + // in $PATH. + string criu_path = 3 [deprecated = true]; bool systemd_cgroup = 4; } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/next.pb.txt b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/next.pb.txt index 7a29ff31c6..76478c5f67 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/next.pb.txt +++ b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/next.pb.txt @@ -1,7 +1,6 @@ file { name: "github.com/containerd/containerd/runtime/v2/runc/options/oci.proto" package: "containerd.runc.v1" - dependency: "gogoproto/gogo.proto" message_type { name: "Options" field { @@ -58,6 +57,9 @@ file { number: 8 label: LABEL_OPTIONAL type: TYPE_STRING + options { + deprecated: true + } json_name: "criuPath" } field { @@ -161,6 +163,5 @@ file { options { go_package: "github.com/containerd/containerd/runtime/v2/runc/options;options" } - weak_dependency: 0 syntax: "proto3" } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go index c9c44742a2..2d90df35cd 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go @@ -1,30 +1,30 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/runtime/v2/runc/options/oci.proto package options import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Options struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // disable pivot root when creating a container NoPivotRoot bool `protobuf:"varint,1,opt,name=no_pivot_root,json=noPivotRoot,proto3" json:"no_pivot_root,omitempty"` // create a new keyring for the container @@ -39,52 +39,138 @@ type Options struct { BinaryName string `protobuf:"bytes,6,opt,name=binary_name,json=binaryName,proto3" json:"binary_name,omitempty"` // runc root directory Root string `protobuf:"bytes,7,opt,name=root,proto3" json:"root,omitempty"` - // criu binary path + // criu binary path. + // + // Deprecated: runc option --criu is now ignored (with a warning), and the + // option will be removed entirely in a future release. Users who need a non- + // standard criu binary should rely on the standard way of looking up binaries + // in $PATH. + // + // Deprecated: Do not use. CriuPath string `protobuf:"bytes,8,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"` // enable systemd cgroups SystemdCgroup bool `protobuf:"varint,9,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"` // criu image path CriuImagePath string `protobuf:"bytes,10,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"` // criu work path - CriuWorkPath string `protobuf:"bytes,11,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + CriuWorkPath string `protobuf:"bytes,11,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"` } -func (m *Options) Reset() { *m = Options{} } -func (*Options) ProtoMessage() {} -func (*Options) Descriptor() ([]byte, []int) { - return fileDescriptor_4e5440d739e9a863, []int{0} -} -func (m *Options) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Options) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Options.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Options) Reset() { + *x = Options{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Options) XXX_Merge(src proto.Message) { - xxx_messageInfo_Options.Merge(m, src) -} -func (m *Options) XXX_Size() int { - return m.Size() -} -func (m *Options) XXX_DiscardUnknown() { - xxx_messageInfo_Options.DiscardUnknown(m) + +func (x *Options) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_Options proto.InternalMessageInfo +func (*Options) ProtoMessage() {} + +func (x *Options) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Options.ProtoReflect.Descriptor instead. +func (*Options) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescGZIP(), []int{0} +} + +func (x *Options) GetNoPivotRoot() bool { + if x != nil { + return x.NoPivotRoot + } + return false +} + +func (x *Options) GetNoNewKeyring() bool { + if x != nil { + return x.NoNewKeyring + } + return false +} + +func (x *Options) GetShimCgroup() string { + if x != nil { + return x.ShimCgroup + } + return "" +} + +func (x *Options) GetIoUid() uint32 { + if x != nil { + return x.IoUid + } + return 0 +} + +func (x *Options) GetIoGid() uint32 { + if x != nil { + return x.IoGid + } + return 0 +} + +func (x *Options) GetBinaryName() string { + if x != nil { + return x.BinaryName + } + return "" +} + +func (x *Options) GetRoot() string { + if x != nil { + return x.Root + } + return "" +} + +// Deprecated: Do not use. +func (x *Options) GetCriuPath() string { + if x != nil { + return x.CriuPath + } + return "" +} + +func (x *Options) GetSystemdCgroup() bool { + if x != nil { + return x.SystemdCgroup + } + return false +} + +func (x *Options) GetCriuImagePath() string { + if x != nil { + return x.CriuImagePath + } + return "" +} + +func (x *Options) GetCriuWorkPath() string { + if x != nil { + return x.CriuWorkPath + } + return "" +} type CheckpointOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // exit the container after a checkpoint Exit bool `protobuf:"varint,1,opt,name=exit,proto3" json:"exit,omitempty"` // checkpoint open tcp connections @@ -102,1357 +188,298 @@ type CheckpointOptions struct { // checkpoint image path ImagePath string `protobuf:"bytes,8,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"` // checkpoint work path - WorkPath string `protobuf:"bytes,9,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + WorkPath string `protobuf:"bytes,9,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"` } -func (m *CheckpointOptions) Reset() { *m = CheckpointOptions{} } -func (*CheckpointOptions) ProtoMessage() {} -func (*CheckpointOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_4e5440d739e9a863, []int{1} -} -func (m *CheckpointOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CheckpointOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckpointOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CheckpointOptions) Reset() { + *x = CheckpointOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CheckpointOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckpointOptions.Merge(m, src) -} -func (m *CheckpointOptions) XXX_Size() int { - return m.Size() -} -func (m *CheckpointOptions) XXX_DiscardUnknown() { - xxx_messageInfo_CheckpointOptions.DiscardUnknown(m) + +func (x *CheckpointOptions) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CheckpointOptions proto.InternalMessageInfo +func (*CheckpointOptions) ProtoMessage() {} + +func (x *CheckpointOptions) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckpointOptions.ProtoReflect.Descriptor instead. +func (*CheckpointOptions) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescGZIP(), []int{1} +} + +func (x *CheckpointOptions) GetExit() bool { + if x != nil { + return x.Exit + } + return false +} + +func (x *CheckpointOptions) GetOpenTcp() bool { + if x != nil { + return x.OpenTcp + } + return false +} + +func (x *CheckpointOptions) GetExternalUnixSockets() bool { + if x != nil { + return x.ExternalUnixSockets + } + return false +} + +func (x *CheckpointOptions) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +func (x *CheckpointOptions) GetFileLocks() bool { + if x != nil { + return x.FileLocks + } + return false +} + +func (x *CheckpointOptions) GetEmptyNamespaces() []string { + if x != nil { + return x.EmptyNamespaces + } + return nil +} + +func (x *CheckpointOptions) GetCgroupsMode() string { + if x != nil { + return x.CgroupsMode + } + return "" +} + +func (x *CheckpointOptions) GetImagePath() string { + if x != nil { + return x.ImagePath + } + return "" +} + +func (x *CheckpointOptions) GetWorkPath() string { + if x != nil { + return x.WorkPath + } + return "" +} type ProcessDetails struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // exec process id if the process is managed by a shim - ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *ProcessDetails) Reset() { + *x = ProcessDetails{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProcessDetails) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ProcessDetails) Reset() { *m = ProcessDetails{} } func (*ProcessDetails) ProtoMessage() {} + +func (x *ProcessDetails) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessDetails.ProtoReflect.Descriptor instead. func (*ProcessDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_4e5440d739e9a863, []int{2} -} -func (m *ProcessDetails) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProcessDetails) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProcessDetails.Merge(m, src) -} -func (m *ProcessDetails) XXX_Size() int { - return m.Size() -} -func (m *ProcessDetails) XXX_DiscardUnknown() { - xxx_messageInfo_ProcessDetails.DiscardUnknown(m) + return file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescGZIP(), []int{2} } -var xxx_messageInfo_ProcessDetails proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Options)(nil), "containerd.runc.v1.Options") - proto.RegisterType((*CheckpointOptions)(nil), "containerd.runc.v1.CheckpointOptions") - proto.RegisterType((*ProcessDetails)(nil), "containerd.runc.v1.ProcessDetails") +func (x *ProcessDetails) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" } -func init() { - proto.RegisterFile("github.com/containerd/containerd/runtime/v2/runc/options/oci.proto", fileDescriptor_4e5440d739e9a863) -} +var File_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto protoreflect.FileDescriptor -var fileDescriptor_4e5440d739e9a863 = []byte{ - // 587 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0x87, 0xeb, 0xfe, 0x49, 0xec, 0x4d, 0x93, 0xc2, 0x42, 0x25, 0xd3, 0x8a, 0x34, 0x94, 0x82, - 0xc2, 0x25, 0x11, 0x45, 0x9c, 0xb8, 0xa0, 0xb6, 0x08, 0x55, 0x40, 0xa9, 0x0c, 0x15, 0xa8, 0x97, - 0x95, 0xbb, 0x1e, 0x9c, 0x51, 0xe2, 0x1d, 0xcb, 0xbb, 0x69, 0xd2, 0x1b, 0xef, 0xc5, 0x0b, 0xf4, - 0xc8, 0x91, 0x13, 0xa2, 0xb9, 0xf1, 0x16, 0x68, 0xd7, 0x4e, 0xdb, 0x33, 0x27, 0xcf, 0x7e, 0xf3, - 0xf3, 0x78, 0xfd, 0xad, 0x96, 0xed, 0xa5, 0x68, 0x06, 0xe3, 0xb3, 0x9e, 0xa4, 0xac, 0x2f, 0x49, - 0x99, 0x18, 0x15, 0x14, 0xc9, 0xed, 0xb2, 0x18, 0x2b, 0x83, 0x19, 0xf4, 0xcf, 0x77, 0x6d, 0x29, - 0xfb, 0x94, 0x1b, 0x24, 0xa5, 0xfb, 0x24, 0xb1, 0x97, 0x17, 0x64, 0x88, 0xf3, 0x9b, 0x74, 0xcf, - 0x46, 0x7a, 0xe7, 0xcf, 0x37, 0xee, 0xa7, 0x94, 0x92, 0x6b, 0xf7, 0x6d, 0x55, 0x26, 0xb7, 0xff, - 0x2e, 0xb2, 0xfa, 0xc7, 0xf2, 0x7d, 0xbe, 0xcd, 0x9a, 0x8a, 0x44, 0x8e, 0xe7, 0x64, 0x44, 0x41, - 0x64, 0x42, 0xaf, 0xe3, 0x75, 0xfd, 0xa8, 0xa1, 0xe8, 0xd8, 0xb2, 0x88, 0xc8, 0xf0, 0x1d, 0xd6, - 0x52, 0x24, 0x14, 0x4c, 0xc4, 0x10, 0x2e, 0x0a, 0x54, 0x69, 0xb8, 0xe8, 0x42, 0xab, 0x8a, 0x8e, - 0x60, 0xf2, 0xae, 0x64, 0x7c, 0x8b, 0x35, 0xf4, 0x00, 0x33, 0x21, 0xd3, 0x82, 0xc6, 0x79, 0xb8, - 0xd4, 0xf1, 0xba, 0x41, 0xc4, 0x2c, 0xda, 0x77, 0x84, 0xaf, 0xb3, 0x1a, 0x92, 0x18, 0x63, 0x12, - 0x2e, 0x77, 0xbc, 0x6e, 0x33, 0x5a, 0x41, 0x3a, 0xc1, 0xa4, 0xc2, 0x29, 0x26, 0xe1, 0xca, 0x1c, - 0xbf, 0xc5, 0xc4, 0x8e, 0x3b, 0x43, 0x15, 0x17, 0x17, 0x42, 0xc5, 0x19, 0x84, 0xb5, 0x72, 0x5c, - 0x89, 0x8e, 0xe2, 0x0c, 0x38, 0x67, 0xcb, 0x6e, 0xc3, 0x75, 0xd7, 0x71, 0x35, 0xdf, 0x64, 0x81, - 0x2c, 0x70, 0x2c, 0xf2, 0xd8, 0x0c, 0x42, 0xdf, 0x35, 0x7c, 0x0b, 0x8e, 0x63, 0x33, 0xe0, 0x4f, - 0x58, 0x4b, 0x5f, 0x68, 0x03, 0x59, 0x32, 0xdf, 0x63, 0xe0, 0x7e, 0xa3, 0x59, 0xd1, 0x6a, 0x9b, - 0x4f, 0xd9, 0x9a, 0x9b, 0x81, 0x59, 0x9c, 0x42, 0x39, 0x89, 0xb9, 0x49, 0x4d, 0x8b, 0x0f, 0x2d, - 0x75, 0xe3, 0x76, 0x58, 0xcb, 0xe5, 0x26, 0x54, 0x0c, 0xcb, 0x58, 0xc3, 0xc5, 0x56, 0x2d, 0xfd, - 0x42, 0xc5, 0xd0, 0xa6, 0xb6, 0x7f, 0x2c, 0xb2, 0xbb, 0xfb, 0x03, 0x90, 0xc3, 0x9c, 0x50, 0x99, - 0xb9, 0x75, 0xce, 0x96, 0x61, 0x8a, 0x73, 0xd9, 0xae, 0xe6, 0x0f, 0x98, 0x4f, 0x39, 0x28, 0x61, - 0x64, 0x5e, 0xf9, 0xad, 0xdb, 0xf5, 0x67, 0x99, 0xf3, 0x5d, 0xb6, 0x0e, 0x53, 0x03, 0x85, 0x8a, - 0x47, 0x62, 0xac, 0x70, 0x2a, 0x34, 0xc9, 0x21, 0x18, 0xed, 0x24, 0xfb, 0xd1, 0xbd, 0x79, 0xf3, - 0x44, 0xe1, 0xf4, 0x53, 0xd9, 0xe2, 0x1b, 0xcc, 0x37, 0x50, 0x64, 0xa8, 0xe2, 0x91, 0xf3, 0xed, - 0x47, 0xd7, 0x6b, 0xfe, 0x90, 0xb1, 0x6f, 0x38, 0x02, 0x31, 0x22, 0x39, 0xd4, 0x4e, 0xbb, 0x1f, - 0x05, 0x96, 0xbc, 0xb7, 0x80, 0x3f, 0x63, 0x77, 0x20, 0xcb, 0x4d, 0x69, 0x5e, 0xe7, 0xb1, 0x04, - 0x1d, 0xd6, 0x3a, 0x4b, 0xdd, 0x20, 0x5a, 0x73, 0xfc, 0xe8, 0x1a, 0xf3, 0x47, 0x6c, 0xb5, 0x74, - 0xa9, 0x45, 0x46, 0x09, 0x54, 0x87, 0xd1, 0xa8, 0xd8, 0x07, 0x4a, 0xc0, 0x7e, 0xec, 0x96, 0xca, - 0xf2, 0x50, 0x02, 0xbc, 0xd6, 0xb8, 0xc9, 0x82, 0x1b, 0x83, 0x41, 0x79, 0x64, 0x93, 0xb9, 0xbd, - 0x97, 0xac, 0x75, 0x5c, 0x90, 0x04, 0xad, 0x0f, 0xc0, 0xc4, 0x38, 0xd2, 0xfc, 0x31, 0xab, 0xc3, - 0x14, 0xa4, 0xc0, 0xc4, 0xc9, 0x0b, 0xf6, 0xd8, 0xec, 0xf7, 0x56, 0xed, 0xcd, 0x14, 0xe4, 0xe1, - 0x41, 0x54, 0xb3, 0xad, 0xc3, 0x64, 0xef, 0xf4, 0xf2, 0xaa, 0xbd, 0xf0, 0xeb, 0xaa, 0xbd, 0xf0, - 0x7d, 0xd6, 0xf6, 0x2e, 0x67, 0x6d, 0xef, 0xe7, 0xac, 0xed, 0xfd, 0x99, 0xb5, 0xbd, 0xd3, 0xd7, - 0xff, 0x7b, 0xd1, 0x5e, 0x55, 0xcf, 0xaf, 0x0b, 0x67, 0x35, 0x77, 0x8b, 0x5e, 0xfc, 0x0b, 0x00, - 0x00, 0xff, 0xff, 0x90, 0x50, 0x79, 0xf2, 0xb5, 0x03, 0x00, 0x00, -} - -func (m *Options) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Options) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Options) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.CriuWorkPath) > 0 { - i -= len(m.CriuWorkPath) - copy(dAtA[i:], m.CriuWorkPath) - i = encodeVarintOci(dAtA, i, uint64(len(m.CriuWorkPath))) - i-- - dAtA[i] = 0x5a - } - if len(m.CriuImagePath) > 0 { - i -= len(m.CriuImagePath) - copy(dAtA[i:], m.CriuImagePath) - i = encodeVarintOci(dAtA, i, uint64(len(m.CriuImagePath))) - i-- - dAtA[i] = 0x52 - } - if m.SystemdCgroup { - i-- - if m.SystemdCgroup { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - } - if len(m.CriuPath) > 0 { - i -= len(m.CriuPath) - copy(dAtA[i:], m.CriuPath) - i = encodeVarintOci(dAtA, i, uint64(len(m.CriuPath))) - i-- - dAtA[i] = 0x42 - } - if len(m.Root) > 0 { - i -= len(m.Root) - copy(dAtA[i:], m.Root) - i = encodeVarintOci(dAtA, i, uint64(len(m.Root))) - i-- - dAtA[i] = 0x3a - } - if len(m.BinaryName) > 0 { - i -= len(m.BinaryName) - copy(dAtA[i:], m.BinaryName) - i = encodeVarintOci(dAtA, i, uint64(len(m.BinaryName))) - i-- - dAtA[i] = 0x32 - } - if m.IoGid != 0 { - i = encodeVarintOci(dAtA, i, uint64(m.IoGid)) - i-- - dAtA[i] = 0x28 - } - if m.IoUid != 0 { - i = encodeVarintOci(dAtA, i, uint64(m.IoUid)) - i-- - dAtA[i] = 0x20 - } - if len(m.ShimCgroup) > 0 { - i -= len(m.ShimCgroup) - copy(dAtA[i:], m.ShimCgroup) - i = encodeVarintOci(dAtA, i, uint64(len(m.ShimCgroup))) - i-- - dAtA[i] = 0x1a - } - if m.NoNewKeyring { - i-- - if m.NoNewKeyring { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.NoPivotRoot { - i-- - if m.NoPivotRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CheckpointOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckpointOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CheckpointOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.WorkPath) > 0 { - i -= len(m.WorkPath) - copy(dAtA[i:], m.WorkPath) - i = encodeVarintOci(dAtA, i, uint64(len(m.WorkPath))) - i-- - dAtA[i] = 0x4a - } - if len(m.ImagePath) > 0 { - i -= len(m.ImagePath) - copy(dAtA[i:], m.ImagePath) - i = encodeVarintOci(dAtA, i, uint64(len(m.ImagePath))) - i-- - dAtA[i] = 0x42 - } - if len(m.CgroupsMode) > 0 { - i -= len(m.CgroupsMode) - copy(dAtA[i:], m.CgroupsMode) - i = encodeVarintOci(dAtA, i, uint64(len(m.CgroupsMode))) - i-- - dAtA[i] = 0x3a - } - if len(m.EmptyNamespaces) > 0 { - for iNdEx := len(m.EmptyNamespaces) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.EmptyNamespaces[iNdEx]) - copy(dAtA[i:], m.EmptyNamespaces[iNdEx]) - i = encodeVarintOci(dAtA, i, uint64(len(m.EmptyNamespaces[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if m.FileLocks { - i-- - if m.FileLocks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.ExternalUnixSockets { - i-- - if m.ExternalUnixSockets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.OpenTcp { - i-- - if m.OpenTcp { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.Exit { - i-- - if m.Exit { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ProcessDetails) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProcessDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintOci(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintOci(dAtA []byte, offset int, v uint64) int { - offset -= sovOci(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Options) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NoPivotRoot { - n += 2 - } - if m.NoNewKeyring { - n += 2 - } - l = len(m.ShimCgroup) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - if m.IoUid != 0 { - n += 1 + sovOci(uint64(m.IoUid)) - } - if m.IoGid != 0 { - n += 1 + sovOci(uint64(m.IoGid)) - } - l = len(m.BinaryName) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - l = len(m.Root) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - l = len(m.CriuPath) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - if m.SystemdCgroup { - n += 2 - } - l = len(m.CriuImagePath) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - l = len(m.CriuWorkPath) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CheckpointOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Exit { - n += 2 - } - if m.OpenTcp { - n += 2 - } - if m.ExternalUnixSockets { - n += 2 - } - if m.Terminal { - n += 2 - } - if m.FileLocks { - n += 2 - } - if len(m.EmptyNamespaces) > 0 { - for _, s := range m.EmptyNamespaces { - l = len(s) - n += 1 + l + sovOci(uint64(l)) - } - } - l = len(m.CgroupsMode) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - l = len(m.ImagePath) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - l = len(m.WorkPath) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ProcessDetails) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovOci(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozOci(x uint64) (n int) { - return sovOci(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Options) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Options{`, - `NoPivotRoot:` + fmt.Sprintf("%v", this.NoPivotRoot) + `,`, - `NoNewKeyring:` + fmt.Sprintf("%v", this.NoNewKeyring) + `,`, - `ShimCgroup:` + fmt.Sprintf("%v", this.ShimCgroup) + `,`, - `IoUid:` + fmt.Sprintf("%v", this.IoUid) + `,`, - `IoGid:` + fmt.Sprintf("%v", this.IoGid) + `,`, - `BinaryName:` + fmt.Sprintf("%v", this.BinaryName) + `,`, - `Root:` + fmt.Sprintf("%v", this.Root) + `,`, - `CriuPath:` + fmt.Sprintf("%v", this.CriuPath) + `,`, - `SystemdCgroup:` + fmt.Sprintf("%v", this.SystemdCgroup) + `,`, - `CriuImagePath:` + fmt.Sprintf("%v", this.CriuImagePath) + `,`, - `CriuWorkPath:` + fmt.Sprintf("%v", this.CriuWorkPath) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CheckpointOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CheckpointOptions{`, - `Exit:` + fmt.Sprintf("%v", this.Exit) + `,`, - `OpenTcp:` + fmt.Sprintf("%v", this.OpenTcp) + `,`, - `ExternalUnixSockets:` + fmt.Sprintf("%v", this.ExternalUnixSockets) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `FileLocks:` + fmt.Sprintf("%v", this.FileLocks) + `,`, - `EmptyNamespaces:` + fmt.Sprintf("%v", this.EmptyNamespaces) + `,`, - `CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`, - `ImagePath:` + fmt.Sprintf("%v", this.ImagePath) + `,`, - `WorkPath:` + fmt.Sprintf("%v", this.WorkPath) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ProcessDetails) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ProcessDetails{`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringOci(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Options) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Options: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Options: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoPivotRoot", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NoPivotRoot = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoNewKeyring", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NoNewKeyring = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShimCgroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ShimCgroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IoUid", wireType) - } - m.IoUid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.IoUid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IoGid", wireType) - } - m.IoGid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.IoGid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BinaryName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BinaryName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Root = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CriuPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CriuPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SystemdCgroup", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SystemdCgroup = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CriuImagePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CriuImagePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CriuWorkPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CriuWorkPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOci(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOci - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CheckpointOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckpointOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckpointOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exit", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Exit = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OpenTcp", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.OpenTcp = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalUnixSockets", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ExternalUnixSockets = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FileLocks", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.FileLocks = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EmptyNamespaces", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EmptyNamespaces = append(m.EmptyNamespaces, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CgroupsMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CgroupsMode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImagePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WorkPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOci(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOci - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProcessDetails) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProcessDetails: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProcessDetails: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOci(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOci - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipOci(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOci - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOci - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOci - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthOci - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupOci - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthOci - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDesc = []byte{ + 0x0a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x75, + 0x6e, 0x63, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x63, 0x69, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x72, 0x75, 0x6e, 0x63, 0x2e, 0x76, 0x31, 0x22, 0xed, 0x02, 0x0a, 0x07, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x6f, 0x5f, 0x70, 0x69, 0x76, 0x6f, 0x74, + 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6e, 0x6f, 0x50, + 0x69, 0x76, 0x6f, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x5f, 0x6e, + 0x65, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0c, 0x6e, 0x6f, 0x4e, 0x65, 0x77, 0x4b, 0x65, 0x79, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x68, 0x69, 0x6d, 0x5f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x68, 0x69, 0x6d, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, + 0x15, 0x0a, 0x06, 0x69, 0x6f, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x05, 0x69, 0x6f, 0x55, 0x69, 0x64, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x6f, 0x5f, 0x67, 0x69, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6f, 0x47, 0x69, 0x64, 0x12, 0x1f, 0x0a, + 0x0b, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, + 0x6f, 0x74, 0x12, 0x1f, 0x0a, 0x09, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x63, 0x72, 0x69, 0x75, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x5f, 0x63, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x64, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x72, + 0x69, 0x75, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x72, 0x69, 0x75, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x5f, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x72, 0x69, 0x75, + 0x57, 0x6f, 0x72, 0x6b, 0x50, 0x61, 0x74, 0x68, 0x22, 0xbb, 0x02, 0x0a, 0x11, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x65, 0x78, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x65, 0x78, + 0x69, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x74, 0x63, 0x70, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x6e, 0x54, 0x63, 0x70, 0x12, 0x32, 0x0a, + 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x65, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x55, 0x6e, 0x69, 0x78, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x1d, 0x0a, + 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x29, 0x0a, 0x10, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, + 0x6b, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, + 0x72, 0x6b, 0x50, 0x61, 0x74, 0x68, 0x22, 0x29, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, + 0x64, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x76, 0x32, + 0x2f, 0x72, 0x75, 0x6e, 0x63, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthOci = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowOci = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupOci = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescData = file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDesc ) + +func file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescData) + }) + return file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescData +} + +var file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_goTypes = []interface{}{ + (*Options)(nil), // 0: containerd.runc.v1.Options + (*CheckpointOptions)(nil), // 1: containerd.runc.v1.CheckpointOptions + (*ProcessDetails)(nil), // 2: containerd.runc.v1.ProcessDetails +} +var file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_init() } +func file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_init() { + if File_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Options); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckpointOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto = out.File + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDesc = nil + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_goTypes = nil + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.proto b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.proto index 6b4bcf462c..6270e99913 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.proto +++ b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.proto @@ -2,8 +2,6 @@ syntax = "proto3"; package containerd.runc.v1; -import weak "gogoproto/gogo.proto"; - option go_package = "github.com/containerd/containerd/runtime/v2/runc/options;options"; message Options { @@ -21,8 +19,13 @@ message Options { string binary_name = 6; // runc root directory string root = 7; - // criu binary path - string criu_path = 8; + // criu binary path. + // + // Deprecated: runc option --criu is now ignored (with a warning), and the + // option will be removed entirely in a future release. Users who need a non- + // standard criu binary should rely on the standard way of looking up binaries + // in $PATH. + string criu_path = 8 [deprecated = true]; // enable systemd cgroups bool systemd_cgroup = 9; // criu image path diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/publisher.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/publisher.go index ed1ebdd58b..5aa78a5d1b 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/publisher.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/publisher.go @@ -25,8 +25,8 @@ import ( "github.com/containerd/containerd/events" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/pkg/ttrpcutil" + "github.com/containerd/containerd/protobuf" "github.com/containerd/ttrpc" - "github.com/containerd/typeurl" "github.com/sirupsen/logrus" ) @@ -110,13 +110,13 @@ func (l *RemoteEventsPublisher) Publish(ctx context.Context, topic string, event if err != nil { return err } - any, err := typeurl.MarshalAny(event) + any, err := protobuf.MarshalAnyToProto(event) if err != nil { return err } i := &item{ ev: &v1.Envelope{ - Timestamp: time.Now(), + Timestamp: protobuf.ToTimestamp(time.Now()), Namespace: ns, Topic: topic, Event: any, diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go index e5822cd921..cf006d8054 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go @@ -22,21 +22,23 @@ import ( "flag" "fmt" "io" + "net" "os" + "path/filepath" "runtime" "runtime/debug" - "strings" "time" + shimapi "github.com/containerd/containerd/api/runtime/task/v2" "github.com/containerd/containerd/events" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/pkg/shutdown" "github.com/containerd/containerd/plugin" - shimapi "github.com/containerd/containerd/runtime/v2/task" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/containerd/protobuf/proto" "github.com/containerd/containerd/version" "github.com/containerd/ttrpc" - "github.com/gogo/protobuf/proto" "github.com/sirupsen/logrus" ) @@ -49,12 +51,22 @@ type Publisher interface { // StartOpts describes shim start configuration received from containerd type StartOpts struct { ID string // TODO(2.0): Remove ID, passed directly to start for call symmetry - ContainerdBinary string + ContainerdBinary string // TODO(2.0): Remove ContainerdBinary, use the TTRPC_ADDRESS env to forward events Address string TTRPCAddress string Debug bool } +// BootstrapParams is a JSON payload returned in stdout from shim.Start call. +type BootstrapParams struct { + // Version is the version of shim parameters (expected 2 for shim v2) + Version int `json:"version"` + // Address is a address containerd should use to connect to shim. + Address string `json:"address"` + // Protocol is either TTRPC or GRPC. + Protocol string `json:"protocol"` +} + type StopStatus struct { Pid int ExitStatus int @@ -135,6 +147,9 @@ var ( const ( ttrpcAddressEnv = "TTRPC_ADDRESS" + grpcAddressEnv = "GRPC_ADDRESS" + namespaceEnv = "NAMESPACE" + maxVersionEnv = "MAX_SHIM_VERSION" ) func parseFlags() { @@ -146,7 +161,9 @@ func parseFlags() { flag.StringVar(&bundlePath, "bundle", "", "path to the bundle if not workdir") flag.StringVar(&addressFlag, "address", "", "grpc address back to main containerd") - flag.StringVar(&containerdBinaryFlag, "publish-binary", "containerd", "path to publish binary (used for publishing events)") + flag.StringVar(&containerdBinaryFlag, "publish-binary", "", + fmt.Sprintf("path to publish binary (used for publishing events), but %s will ignore this flag, please use the %s env", os.Args[0], ttrpcAddressEnv), + ) flag.Parse() action = flag.Arg(0) @@ -176,7 +193,7 @@ func setLogger(ctx context.Context, id string) (context.Context, error) { l.Logger.SetLevel(logrus.DebugLevel) } f, err := openLog(ctx, id) - if err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error + if err != nil { return ctx, err } l.Logger.SetOutput(f) @@ -224,11 +241,11 @@ func (stm shimToManager) Stop(ctx context.Context, id string) (StopStatus, error return StopStatus{ Pid: int(dr.Pid), ExitStatus: int(dr.ExitStatus), - ExitedAt: dr.ExitedAt, + ExitedAt: protobuf.FromTimestamp(dr.ExitedAt), }, nil } -// RunManager initialzes and runs a shim server +// RunManager initializes and runs a shim server. // TODO(2.0): Rename to Run func RunManager(ctx context.Context, manager Manager, opts ...BinaryOpts) { var config Config @@ -247,7 +264,7 @@ func RunManager(ctx context.Context, manager Manager, opts ...BinaryOpts) { func run(ctx context.Context, manager Manager, initFunc Init, name string, config Config) error { parseFlags() if versionFlag { - fmt.Printf("%s:\n", os.Args[0]) + fmt.Printf("%s:\n", filepath.Base(os.Args[0])) fmt.Println(" Version: ", version.Version) fmt.Println(" Revision:", version.Revision) fmt.Println(" Go version:", version.GoVersion) @@ -262,12 +279,12 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi setRuntime() signals, err := setupSignals(config) - if err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error + if err != nil { return err } if !config.NoSubreaper { - if err := subreaper(); err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error + if err := subreaper(); err != nil { return err } } @@ -308,7 +325,10 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi // Handle explicit actions switch action { case "delete": - logger := log.G(ctx).WithFields(logrus.Fields{ + if debugFlag { + logrus.SetLevel(logrus.DebugLevel) + } + logger := log.G(ctx).WithFields(log.Fields{ "pid": os.Getpid(), "namespace": namespaceFlag, }) @@ -320,7 +340,7 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi data, err := proto.Marshal(&shimapi.DeleteResponse{ Pid: uint32(ss.Pid), ExitStatus: uint32(ss.ExitStatus), - ExitedAt: ss.ExitedAt, + ExitedAt: protobuf.ToTimestamp(ss.ExitedAt), }) if err != nil { return err @@ -331,10 +351,9 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi return nil case "start": opts := StartOpts{ - ContainerdBinary: containerdBinaryFlag, - Address: addressFlag, - TTRPCAddress: ttrpcAddress, - Debug: debugFlag, + Address: addressFlag, + TTRPCAddress: ttrpcAddress, + Debug: debugFlag, } address, err := manager.Start(ctx, id, opts) @@ -447,7 +466,7 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi } } - if err := serve(ctx, server, signals, sd.Shutdown); err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error + if err := serve(ctx, server, signals, sd.Shutdown); err != nil { if err != shutdown.ErrShutdown { return err } @@ -479,17 +498,16 @@ func serve(ctx context.Context, server *ttrpc.Server, signals chan os.Signal, sh } l, err := serveListener(socketFlag) - if err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error + if err != nil { return err } go func() { defer l.Close() - if err := server.Serve(ctx, l); err != nil && - !strings.Contains(err.Error(), "use of closed network connection") { + if err := server.Serve(ctx, l); err != nil && !errors.Is(err, net.ErrClosed) { log.G(ctx).WithError(err).Fatal("containerd-shim: ttrpc server failure") } }() - logger := log.G(ctx).WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(log.Fields{ "pid": os.Getpid(), "path": path, "namespace": namespaceFlag, diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go index e2dab0931e..90a7dbb072 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -72,7 +71,7 @@ func serveListener(path string) (net.Listener, error) { } func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { - logger.Info("starting signal loop") + logger.Debug("starting signal loop") for { select { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go index 2add7ac33f..a290a06fb6 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go @@ -18,41 +18,41 @@ package shim import ( "context" - "errors" "io" "net" "os" + "github.com/containerd/containerd/errdefs" "github.com/containerd/ttrpc" "github.com/sirupsen/logrus" ) func setupSignals(config Config) (chan os.Signal, error) { - return nil, errors.New("not supported") + return nil, errdefs.ErrNotImplemented } func newServer(opts ...ttrpc.ServerOpt) (*ttrpc.Server, error) { - return nil, errors.New("not supported") + return nil, errdefs.ErrNotImplemented } func subreaper() error { - return errors.New("not supported") + return errdefs.ErrNotImplemented } func setupDumpStacks(dump chan<- os.Signal) { } func serveListener(path string) (net.Listener, error) { - return nil, errors.New("not supported") + return nil, errdefs.ErrNotImplemented } func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { - return errors.New("not supported") + return errdefs.ErrNotImplemented } func handleExitSignals(ctx context.Context, logger *logrus.Entry, cancel context.CancelFunc) { } func openLog(ctx context.Context, _ string) (io.Writer, error) { - return nil, errors.New("not supported") + return nil, errdefs.ErrNotImplemented } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go index 94c0f53978..fce1318a63 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go @@ -21,6 +21,7 @@ import ( "context" "errors" "fmt" + "io" "net" "os" "path/filepath" @@ -28,12 +29,14 @@ import ( "time" "github.com/containerd/ttrpc" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" exec "golang.org/x/sys/execabs" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/pkg/atomicfile" + "github.com/containerd/containerd/protobuf/proto" + "github.com/containerd/containerd/protobuf/types" ) type CommandConfig struct { @@ -67,7 +70,10 @@ func Command(ctx context.Context, config *CommandConfig) (*exec.Cmd, error) { cmd.Env = append( os.Environ(), "GOMAXPROCS=2", + fmt.Sprintf("%s=2", maxVersionEnv), fmt.Sprintf("%s=%s", ttrpcAddressEnv, config.TTRPCAddress), + fmt.Sprintf("%s=%s", grpcAddressEnv, config.Address), + fmt.Sprintf("%s=%s", namespaceEnv, ns), ) if config.SchedCore { cmd.Env = append(cmd.Env, "SCHED_CORE=1") @@ -88,7 +94,7 @@ func Command(ctx context.Context, config *CommandConfig) (*exec.Cmd, error) { func BinaryName(runtime string) string { // runtime name should format like $prefix.name.version parts := strings.Split(runtime, ".") - if len(parts) < 2 { + if len(parts) < 2 || parts[0] == "" { return "" } @@ -169,6 +175,41 @@ func ReadAddress(path string) (string, error) { return string(data), nil } +// ReadRuntimeOptions reads config bytes from io.Reader and unmarshals it into the provided type. +// The type must be registered with typeurl. +// +// The function will return ErrNotFound, if the config is not provided. +// And ErrInvalidArgument, if unable to cast the config to the provided type T. +func ReadRuntimeOptions[T any](reader io.Reader) (T, error) { + var config T + + data, err := io.ReadAll(reader) + if err != nil { + return config, fmt.Errorf("failed to read config bytes from stdin: %w", err) + } + + if len(data) == 0 { + return config, errdefs.ErrNotFound + } + + var any types.Any + if err := proto.Unmarshal(data, &any); err != nil { + return config, err + } + + v, err := typeurl.UnmarshalAny(&any) + if err != nil { + return config, err + } + + config, ok := v.(T) + if !ok { + return config, fmt.Errorf("invalid type %T: %w", v, errdefs.ErrInvalidArgument) + } + + return config, nil +} + // chainUnaryServerInterceptors creates a single ttrpc server interceptor from // a chain of many interceptors executed from first to last. func chainUnaryServerInterceptors(interceptors ...ttrpc.UnaryServerInterceptor) ttrpc.UnaryServerInterceptor { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go index 4e2309a806..ac470f914c 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -26,6 +25,7 @@ import ( "net" "os" "path/filepath" + "runtime" "strings" "syscall" "time" @@ -87,15 +87,20 @@ func AnonReconnectDialer(address string, timeout time.Duration) (net.Conn, error // NewSocket returns a new socket func NewSocket(address string) (*net.UnixListener, error) { var ( - sock = socket(address) - path = sock.path() + sock = socket(address) + path = sock.path() + isAbstract = sock.isAbstract() + perm = os.FileMode(0600) ) - isAbstract := sock.isAbstract() + // Darwin needs +x to access socket, otherwise it'll fail with "bind: permission denied" when running as non-root. + if runtime.GOOS == "darwin" { + perm = 0700 + } if !isAbstract { - if err := os.MkdirAll(filepath.Dir(path), 0600); err != nil { - return nil, fmt.Errorf("%s: %w", path, err) + if err := os.MkdirAll(filepath.Dir(path), perm); err != nil { + return nil, fmt.Errorf("mkdir failed for %s: %w", path, err) } } l, err := net.Listen("unix", path) @@ -104,12 +109,13 @@ func NewSocket(address string) (*net.UnixListener, error) { } if !isAbstract { - if err := os.Chmod(path, 0600); err != nil { + if err := os.Chmod(path, perm); err != nil { os.Remove(sock.path()) l.Close() - return nil, err + return nil, fmt.Errorf("chmod failed for %s: %w", path, err) } } + return l.(*net.UnixListener), nil } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go b/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go deleted file mode 100644 index 6366f9c57f..0000000000 --- a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go +++ /dev/null @@ -1,7312 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/containerd/containerd/runtime/v2/task/shim.proto - -package task - -import ( - context "context" - fmt "fmt" - types "github.com/containerd/containerd/api/types" - task "github.com/containerd/containerd/api/types/task" - github_com_containerd_ttrpc "github.com/containerd/ttrpc" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types1 "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type CreateTaskRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` - Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` - Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` - Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` - Checkpoint string `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` - ParentCheckpoint string `protobuf:"bytes,9,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"` - Options *types1.Any `protobuf:"bytes,10,opt,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateTaskRequest) Reset() { *m = CreateTaskRequest{} } -func (*CreateTaskRequest) ProtoMessage() {} -func (*CreateTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{0} -} -func (m *CreateTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CreateTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateTaskRequest.Merge(m, src) -} -func (m *CreateTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *CreateTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateTaskRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateTaskRequest proto.InternalMessageInfo - -type CreateTaskResponse struct { - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateTaskResponse) Reset() { *m = CreateTaskResponse{} } -func (*CreateTaskResponse) ProtoMessage() {} -func (*CreateTaskResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{1} -} -func (m *CreateTaskResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateTaskResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CreateTaskResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateTaskResponse.Merge(m, src) -} -func (m *CreateTaskResponse) XXX_Size() int { - return m.Size() -} -func (m *CreateTaskResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateTaskResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateTaskResponse proto.InternalMessageInfo - -type DeleteRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{2} -} -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(m, src) -} -func (m *DeleteRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo - -type DeleteResponse struct { - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - ExitStatus uint32 `protobuf:"varint,2,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,3,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } -func (*DeleteResponse) ProtoMessage() {} -func (*DeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{3} -} -func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteResponse.Merge(m, src) -} -func (m *DeleteResponse) XXX_Size() int { - return m.Size() -} -func (m *DeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo - -type ExecProcessRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Terminal bool `protobuf:"varint,3,opt,name=terminal,proto3" json:"terminal,omitempty"` - Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` - Spec *types1.Any `protobuf:"bytes,7,opt,name=spec,proto3" json:"spec,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExecProcessRequest) Reset() { *m = ExecProcessRequest{} } -func (*ExecProcessRequest) ProtoMessage() {} -func (*ExecProcessRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{4} -} -func (m *ExecProcessRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExecProcessRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExecProcessRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecProcessRequest.Merge(m, src) -} -func (m *ExecProcessRequest) XXX_Size() int { - return m.Size() -} -func (m *ExecProcessRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExecProcessRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExecProcessRequest proto.InternalMessageInfo - -type ExecProcessResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExecProcessResponse) Reset() { *m = ExecProcessResponse{} } -func (*ExecProcessResponse) ProtoMessage() {} -func (*ExecProcessResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{5} -} -func (m *ExecProcessResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecProcessResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExecProcessResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExecProcessResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecProcessResponse.Merge(m, src) -} -func (m *ExecProcessResponse) XXX_Size() int { - return m.Size() -} -func (m *ExecProcessResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExecProcessResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExecProcessResponse proto.InternalMessageInfo - -type ResizePtyRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Width uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"` - Height uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResizePtyRequest) Reset() { *m = ResizePtyRequest{} } -func (*ResizePtyRequest) ProtoMessage() {} -func (*ResizePtyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{6} -} -func (m *ResizePtyRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResizePtyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResizePtyRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResizePtyRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResizePtyRequest.Merge(m, src) -} -func (m *ResizePtyRequest) XXX_Size() int { - return m.Size() -} -func (m *ResizePtyRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResizePtyRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResizePtyRequest proto.InternalMessageInfo - -type StateRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StateRequest) Reset() { *m = StateRequest{} } -func (*StateRequest) ProtoMessage() {} -func (*StateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{7} -} -func (m *StateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StateRequest.Merge(m, src) -} -func (m *StateRequest) XXX_Size() int { - return m.Size() -} -func (m *StateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StateRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StateRequest proto.InternalMessageInfo - -type StateResponse struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` - Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - Status task.Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` - Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` - Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` - ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - ExecID string `protobuf:"bytes,11,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StateResponse) Reset() { *m = StateResponse{} } -func (*StateResponse) ProtoMessage() {} -func (*StateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{8} -} -func (m *StateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StateResponse.Merge(m, src) -} -func (m *StateResponse) XXX_Size() int { - return m.Size() -} -func (m *StateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StateResponse proto.InternalMessageInfo - -type KillRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Signal uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` - All bool `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KillRequest) Reset() { *m = KillRequest{} } -func (*KillRequest) ProtoMessage() {} -func (*KillRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{9} -} -func (m *KillRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KillRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KillRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KillRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_KillRequest.Merge(m, src) -} -func (m *KillRequest) XXX_Size() int { - return m.Size() -} -func (m *KillRequest) XXX_DiscardUnknown() { - xxx_messageInfo_KillRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_KillRequest proto.InternalMessageInfo - -type CloseIORequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Stdin bool `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseIORequest) Reset() { *m = CloseIORequest{} } -func (*CloseIORequest) ProtoMessage() {} -func (*CloseIORequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{10} -} -func (m *CloseIORequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CloseIORequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CloseIORequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CloseIORequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseIORequest.Merge(m, src) -} -func (m *CloseIORequest) XXX_Size() int { - return m.Size() -} -func (m *CloseIORequest) XXX_DiscardUnknown() { - xxx_messageInfo_CloseIORequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseIORequest proto.InternalMessageInfo - -type PidsRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PidsRequest) Reset() { *m = PidsRequest{} } -func (*PidsRequest) ProtoMessage() {} -func (*PidsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{11} -} -func (m *PidsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PidsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PidsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PidsRequest.Merge(m, src) -} -func (m *PidsRequest) XXX_Size() int { - return m.Size() -} -func (m *PidsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PidsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PidsRequest proto.InternalMessageInfo - -type PidsResponse struct { - Processes []*task.ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PidsResponse) Reset() { *m = PidsResponse{} } -func (*PidsResponse) ProtoMessage() {} -func (*PidsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{12} -} -func (m *PidsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PidsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PidsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PidsResponse.Merge(m, src) -} -func (m *PidsResponse) XXX_Size() int { - return m.Size() -} -func (m *PidsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PidsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PidsResponse proto.InternalMessageInfo - -type CheckpointTaskRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` - Options *types1.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CheckpointTaskRequest) Reset() { *m = CheckpointTaskRequest{} } -func (*CheckpointTaskRequest) ProtoMessage() {} -func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{13} -} -func (m *CheckpointTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CheckpointTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckpointTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CheckpointTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckpointTaskRequest.Merge(m, src) -} -func (m *CheckpointTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *CheckpointTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CheckpointTaskRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CheckpointTaskRequest proto.InternalMessageInfo - -type UpdateTaskRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Resources *types1.Any `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` - Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{} } -func (*UpdateTaskRequest) ProtoMessage() {} -func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{14} -} -func (m *UpdateTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UpdateTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateTaskRequest.Merge(m, src) -} -func (m *UpdateTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *UpdateTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateTaskRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdateTaskRequest proto.InternalMessageInfo - -type StartRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StartRequest) Reset() { *m = StartRequest{} } -func (*StartRequest) ProtoMessage() {} -func (*StartRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{15} -} -func (m *StartRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StartRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StartRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartRequest.Merge(m, src) -} -func (m *StartRequest) XXX_Size() int { - return m.Size() -} -func (m *StartRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StartRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StartRequest proto.InternalMessageInfo - -type StartResponse struct { - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StartResponse) Reset() { *m = StartResponse{} } -func (*StartResponse) ProtoMessage() {} -func (*StartResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{16} -} -func (m *StartResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StartResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StartResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StartResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartResponse.Merge(m, src) -} -func (m *StartResponse) XXX_Size() int { - return m.Size() -} -func (m *StartResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StartResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StartResponse proto.InternalMessageInfo - -type WaitRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WaitRequest) Reset() { *m = WaitRequest{} } -func (*WaitRequest) ProtoMessage() {} -func (*WaitRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{17} -} -func (m *WaitRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WaitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WaitRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WaitRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WaitRequest.Merge(m, src) -} -func (m *WaitRequest) XXX_Size() int { - return m.Size() -} -func (m *WaitRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WaitRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WaitRequest proto.InternalMessageInfo - -type WaitResponse struct { - ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WaitResponse) Reset() { *m = WaitResponse{} } -func (*WaitResponse) ProtoMessage() {} -func (*WaitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{18} -} -func (m *WaitResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WaitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WaitResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WaitResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WaitResponse.Merge(m, src) -} -func (m *WaitResponse) XXX_Size() int { - return m.Size() -} -func (m *WaitResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WaitResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WaitResponse proto.InternalMessageInfo - -type StatsRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatsRequest) Reset() { *m = StatsRequest{} } -func (*StatsRequest) ProtoMessage() {} -func (*StatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{19} -} -func (m *StatsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatsRequest.Merge(m, src) -} -func (m *StatsRequest) XXX_Size() int { - return m.Size() -} -func (m *StatsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StatsRequest proto.InternalMessageInfo - -type StatsResponse struct { - Stats *types1.Any `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatsResponse) Reset() { *m = StatsResponse{} } -func (*StatsResponse) ProtoMessage() {} -func (*StatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{20} -} -func (m *StatsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatsResponse.Merge(m, src) -} -func (m *StatsResponse) XXX_Size() int { - return m.Size() -} -func (m *StatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StatsResponse proto.InternalMessageInfo - -type ConnectRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } -func (*ConnectRequest) ProtoMessage() {} -func (*ConnectRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{21} -} -func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ConnectRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectRequest.Merge(m, src) -} -func (m *ConnectRequest) XXX_Size() int { - return m.Size() -} -func (m *ConnectRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo - -type ConnectResponse struct { - ShimPid uint32 `protobuf:"varint,1,opt,name=shim_pid,json=shimPid,proto3" json:"shim_pid,omitempty"` - TaskPid uint32 `protobuf:"varint,2,opt,name=task_pid,json=taskPid,proto3" json:"task_pid,omitempty"` - Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectResponse) Reset() { *m = ConnectResponse{} } -func (*ConnectResponse) ProtoMessage() {} -func (*ConnectResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{22} -} -func (m *ConnectResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConnectResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConnectResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ConnectResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectResponse.Merge(m, src) -} -func (m *ConnectResponse) XXX_Size() int { - return m.Size() -} -func (m *ConnectResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectResponse proto.InternalMessageInfo - -type ShutdownRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Now bool `protobuf:"varint,2,opt,name=now,proto3" json:"now,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutdownRequest) Reset() { *m = ShutdownRequest{} } -func (*ShutdownRequest) ProtoMessage() {} -func (*ShutdownRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{23} -} -func (m *ShutdownRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ShutdownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ShutdownRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ShutdownRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutdownRequest.Merge(m, src) -} -func (m *ShutdownRequest) XXX_Size() int { - return m.Size() -} -func (m *ShutdownRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ShutdownRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutdownRequest proto.InternalMessageInfo - -type PauseRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PauseRequest) Reset() { *m = PauseRequest{} } -func (*PauseRequest) ProtoMessage() {} -func (*PauseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{24} -} -func (m *PauseRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PauseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PauseRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PauseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PauseRequest.Merge(m, src) -} -func (m *PauseRequest) XXX_Size() int { - return m.Size() -} -func (m *PauseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PauseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PauseRequest proto.InternalMessageInfo - -type ResumeRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResumeRequest) Reset() { *m = ResumeRequest{} } -func (*ResumeRequest) ProtoMessage() {} -func (*ResumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{25} -} -func (m *ResumeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResumeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResumeRequest.Merge(m, src) -} -func (m *ResumeRequest) XXX_Size() int { - return m.Size() -} -func (m *ResumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResumeRequest proto.InternalMessageInfo - -func init() { - proto.RegisterType((*CreateTaskRequest)(nil), "containerd.task.v2.CreateTaskRequest") - proto.RegisterType((*CreateTaskResponse)(nil), "containerd.task.v2.CreateTaskResponse") - proto.RegisterType((*DeleteRequest)(nil), "containerd.task.v2.DeleteRequest") - proto.RegisterType((*DeleteResponse)(nil), "containerd.task.v2.DeleteResponse") - proto.RegisterType((*ExecProcessRequest)(nil), "containerd.task.v2.ExecProcessRequest") - proto.RegisterType((*ExecProcessResponse)(nil), "containerd.task.v2.ExecProcessResponse") - proto.RegisterType((*ResizePtyRequest)(nil), "containerd.task.v2.ResizePtyRequest") - proto.RegisterType((*StateRequest)(nil), "containerd.task.v2.StateRequest") - proto.RegisterType((*StateResponse)(nil), "containerd.task.v2.StateResponse") - proto.RegisterType((*KillRequest)(nil), "containerd.task.v2.KillRequest") - proto.RegisterType((*CloseIORequest)(nil), "containerd.task.v2.CloseIORequest") - proto.RegisterType((*PidsRequest)(nil), "containerd.task.v2.PidsRequest") - proto.RegisterType((*PidsResponse)(nil), "containerd.task.v2.PidsResponse") - proto.RegisterType((*CheckpointTaskRequest)(nil), "containerd.task.v2.CheckpointTaskRequest") - proto.RegisterType((*UpdateTaskRequest)(nil), "containerd.task.v2.UpdateTaskRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.task.v2.UpdateTaskRequest.AnnotationsEntry") - proto.RegisterType((*StartRequest)(nil), "containerd.task.v2.StartRequest") - proto.RegisterType((*StartResponse)(nil), "containerd.task.v2.StartResponse") - proto.RegisterType((*WaitRequest)(nil), "containerd.task.v2.WaitRequest") - proto.RegisterType((*WaitResponse)(nil), "containerd.task.v2.WaitResponse") - proto.RegisterType((*StatsRequest)(nil), "containerd.task.v2.StatsRequest") - proto.RegisterType((*StatsResponse)(nil), "containerd.task.v2.StatsResponse") - proto.RegisterType((*ConnectRequest)(nil), "containerd.task.v2.ConnectRequest") - proto.RegisterType((*ConnectResponse)(nil), "containerd.task.v2.ConnectResponse") - proto.RegisterType((*ShutdownRequest)(nil), "containerd.task.v2.ShutdownRequest") - proto.RegisterType((*PauseRequest)(nil), "containerd.task.v2.PauseRequest") - proto.RegisterType((*ResumeRequest)(nil), "containerd.task.v2.ResumeRequest") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/runtime/v2/task/shim.proto", fileDescriptor_9202ee34bc3ad8ca) -} - -var fileDescriptor_9202ee34bc3ad8ca = []byte{ - // 1306 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0x4d, 0x6f, 0xdb, 0x46, - 0x13, 0x0e, 0xf5, 0x41, 0x49, 0xa3, 0xc8, 0x71, 0xf6, 0x75, 0xf2, 0x32, 0x0a, 0x20, 0x29, 0x4c, - 0x93, 0xaa, 0x2d, 0x40, 0xa1, 0x0a, 0x1a, 0x14, 0x31, 0x90, 0xc2, 0x76, 0xdc, 0x40, 0x4d, 0x5a, - 0x1b, 0x4c, 0x8a, 0x04, 0xbd, 0x18, 0xb4, 0xb8, 0x91, 0x08, 0x4b, 0x5c, 0x96, 0xbb, 0x74, 0xa2, - 0x02, 0x05, 0x7a, 0xea, 0xa1, 0xa7, 0xfe, 0xac, 0x1c, 0x0b, 0xf4, 0xd2, 0x4b, 0xd3, 0x46, 0xff, - 0xa0, 0xc7, 0xde, 0x8a, 0xfd, 0x90, 0x45, 0x49, 0xa4, 0x14, 0x07, 0xba, 0x18, 0x3b, 0xdc, 0x67, - 0x67, 0x67, 0x67, 0x9f, 0x79, 0x66, 0x65, 0xd8, 0xee, 0x79, 0xac, 0x1f, 0x1d, 0x5b, 0x5d, 0x32, - 0x6c, 0x75, 0x89, 0xcf, 0x1c, 0xcf, 0xc7, 0xa1, 0x1b, 0x1f, 0x86, 0x91, 0xcf, 0xbc, 0x21, 0x6e, - 0x9d, 0xb6, 0x5b, 0xcc, 0xa1, 0x27, 0x2d, 0xda, 0xf7, 0x86, 0x56, 0x10, 0x12, 0x46, 0x10, 0x9a, - 0xc2, 0x2c, 0x3e, 0x67, 0x9d, 0xb6, 0xab, 0xd7, 0x7a, 0x84, 0xf4, 0x06, 0xb8, 0x25, 0x10, 0xc7, - 0xd1, 0x8b, 0x96, 0xe3, 0x8f, 0x24, 0xbc, 0x7a, 0x7d, 0x7e, 0x0a, 0x0f, 0x03, 0x36, 0x99, 0xdc, - 0xea, 0x91, 0x1e, 0x11, 0xc3, 0x16, 0x1f, 0xa9, 0xaf, 0xf5, 0xf9, 0x25, 0x3c, 0x14, 0xca, 0x9c, - 0x61, 0xa0, 0x00, 0x77, 0x57, 0xc6, 0xef, 0x04, 0x5e, 0x8b, 0x8d, 0x02, 0x4c, 0x5b, 0x43, 0x12, - 0xf9, 0x4c, 0xad, 0xbb, 0x77, 0x8e, 0x75, 0xe2, 0xd8, 0xe2, 0x7c, 0x62, 0xad, 0xf9, 0x7b, 0x06, - 0x2e, 0xef, 0x85, 0xd8, 0x61, 0xf8, 0xa9, 0x43, 0x4f, 0x6c, 0xfc, 0x7d, 0x84, 0x29, 0x43, 0x57, - 0x21, 0xe3, 0xb9, 0x86, 0xd6, 0xd0, 0x9a, 0xa5, 0x5d, 0x7d, 0xfc, 0xa6, 0x9e, 0xe9, 0x3c, 0xb0, - 0x33, 0x9e, 0x8b, 0xae, 0x82, 0x7e, 0x1c, 0xf9, 0xee, 0x00, 0x1b, 0x19, 0x3e, 0x67, 0x2b, 0x0b, - 0xb5, 0x40, 0x0f, 0x09, 0x61, 0x2f, 0xa8, 0x91, 0x6d, 0x64, 0x9b, 0xe5, 0xf6, 0xff, 0xad, 0x78, - 0x36, 0xf9, 0xc6, 0xd6, 0xd7, 0x3c, 0x60, 0x5b, 0xc1, 0x50, 0x15, 0x8a, 0x0c, 0x87, 0x43, 0xcf, - 0x77, 0x06, 0x46, 0xae, 0xa1, 0x35, 0x8b, 0xf6, 0x99, 0x8d, 0xb6, 0x20, 0x4f, 0x99, 0xeb, 0xf9, - 0x46, 0x5e, 0xec, 0x21, 0x0d, 0xbe, 0x35, 0x65, 0x2e, 0x89, 0x98, 0xa1, 0xcb, 0xad, 0xa5, 0xa5, - 0xbe, 0xe3, 0x30, 0x34, 0x0a, 0x67, 0xdf, 0x71, 0x18, 0xa2, 0x1a, 0x40, 0xb7, 0x8f, 0xbb, 0x27, - 0x01, 0xf1, 0x7c, 0x66, 0x14, 0xc5, 0x5c, 0xec, 0x0b, 0xfa, 0x04, 0x2e, 0x07, 0x4e, 0x88, 0x7d, - 0x76, 0x14, 0x83, 0x95, 0x04, 0x6c, 0x53, 0x4e, 0xec, 0x4d, 0xc1, 0x16, 0x14, 0x48, 0xc0, 0x3c, - 0xe2, 0x53, 0x03, 0x1a, 0x5a, 0xb3, 0xdc, 0xde, 0xb2, 0xe4, 0x65, 0x5a, 0x93, 0xcb, 0xb4, 0x76, - 0xfc, 0x91, 0x3d, 0x01, 0x99, 0xb7, 0x01, 0xc5, 0x93, 0x4a, 0x03, 0xe2, 0x53, 0x8c, 0x36, 0x21, - 0x1b, 0xa8, 0xb4, 0x56, 0x6c, 0x3e, 0x34, 0x1f, 0x43, 0xe5, 0x01, 0x1e, 0x60, 0x86, 0x57, 0x25, - 0xfe, 0x26, 0x14, 0xf0, 0x2b, 0xdc, 0x3d, 0xf2, 0x5c, 0x99, 0xf9, 0x5d, 0x18, 0xbf, 0xa9, 0xeb, - 0xfb, 0xaf, 0x70, 0xb7, 0xf3, 0xc0, 0xd6, 0xf9, 0x54, 0xc7, 0x35, 0x7f, 0xd6, 0x60, 0x63, 0xe2, - 0x2e, 0x6d, 0x4b, 0x54, 0x87, 0x32, 0x7e, 0xe5, 0xb1, 0x23, 0xca, 0x1c, 0x16, 0x51, 0xe1, 0xad, - 0x62, 0x03, 0xff, 0xf4, 0x44, 0x7c, 0x41, 0x3b, 0x50, 0xe2, 0x16, 0x76, 0x8f, 0x1c, 0x66, 0x64, - 0xc5, 0x69, 0xab, 0x0b, 0xa7, 0x7d, 0x3a, 0xa1, 0xee, 0x6e, 0xf1, 0xf5, 0x9b, 0xfa, 0x85, 0x5f, - 0xff, 0xaa, 0x6b, 0x76, 0x51, 0x2e, 0xdb, 0x61, 0xe6, 0x9f, 0x1a, 0x20, 0x1e, 0xdb, 0x61, 0x48, - 0xba, 0x98, 0xd2, 0x75, 0x1c, 0x6e, 0x86, 0x31, 0xd9, 0x34, 0xc6, 0xe4, 0x92, 0x19, 0x93, 0x4f, - 0x61, 0x8c, 0x3e, 0xc3, 0x98, 0x26, 0xe4, 0x68, 0x80, 0xbb, 0x82, 0x47, 0x69, 0x37, 0x2c, 0x10, - 0xe6, 0x15, 0xf8, 0xdf, 0xcc, 0xf1, 0x64, 0xb2, 0xcd, 0x1f, 0x61, 0xd3, 0xc6, 0xd4, 0xfb, 0x01, - 0x1f, 0xb2, 0xd1, 0x5a, 0xce, 0xbc, 0x05, 0xf9, 0x97, 0x9e, 0xcb, 0xfa, 0xe2, 0xc0, 0x15, 0x5b, - 0x1a, 0x3c, 0xfe, 0x3e, 0xf6, 0x7a, 0x7d, 0x26, 0x8e, 0x5b, 0xb1, 0x95, 0x65, 0x3e, 0x82, 0x8b, - 0xfc, 0x0a, 0xd7, 0xc3, 0xa5, 0x7f, 0x32, 0x50, 0x51, 0xde, 0x14, 0x95, 0xce, 0xab, 0x09, 0x8a, - 0x7a, 0xd9, 0x29, 0xf5, 0xee, 0xf0, 0xc4, 0x0b, 0xd6, 0xf1, 0xc0, 0x37, 0xda, 0xd7, 0xe3, 0x2a, - 0x71, 0xfa, 0xa9, 0x12, 0x0a, 0x49, 0x43, 0x5b, 0x41, 0xd7, 0xa4, 0x06, 0x71, 0xf6, 0x14, 0xe7, - 0xd8, 0x33, 0x57, 0x11, 0xa5, 0xe5, 0x15, 0x01, 0xef, 0x53, 0x11, 0xf1, 0x9c, 0x97, 0x53, 0x73, - 0xce, 0xa0, 0xfc, 0xc8, 0x1b, 0x0c, 0xd6, 0x42, 0x1d, 0x9e, 0x08, 0xaf, 0x37, 0x29, 0x96, 0x8a, - 0xad, 0x2c, 0x7e, 0x2b, 0xce, 0x60, 0xa2, 0xb9, 0x7c, 0x68, 0x76, 0x61, 0x63, 0x6f, 0x40, 0x28, - 0xee, 0x1c, 0xac, 0x8b, 0xb3, 0xf2, 0xbe, 0x64, 0x91, 0x4a, 0xc3, 0xbc, 0x05, 0xe5, 0x43, 0xcf, - 0x5d, 0xa5, 0x04, 0xe6, 0x37, 0x70, 0x51, 0xc2, 0x14, 0xe7, 0xee, 0x43, 0x29, 0x90, 0x45, 0x86, - 0xa9, 0xa1, 0x89, 0xd6, 0xd2, 0x48, 0x24, 0x8d, 0x2a, 0xc5, 0x8e, 0xff, 0x82, 0xd8, 0xd3, 0x25, - 0x26, 0x85, 0x2b, 0x53, 0x15, 0x7f, 0x97, 0x06, 0x87, 0x20, 0x17, 0x38, 0xac, 0xaf, 0xa8, 0x2c, - 0xc6, 0x71, 0xf1, 0xcf, 0xbe, 0x8b, 0xf8, 0xff, 0xab, 0xc1, 0xe5, 0x6f, 0x03, 0xf7, 0x1d, 0x5b, - 0x6a, 0x1b, 0x4a, 0x21, 0xa6, 0x24, 0x0a, 0xbb, 0x58, 0xaa, 0x71, 0x9a, 0xff, 0x29, 0x0c, 0x3d, - 0x87, 0xb2, 0xe3, 0xfb, 0x84, 0x39, 0x93, 0xa8, 0x78, 0x62, 0xee, 0x5a, 0x8b, 0x2f, 0x18, 0x6b, - 0x21, 0x0e, 0x6b, 0x67, 0xba, 0x70, 0xdf, 0x67, 0xe1, 0xc8, 0x8e, 0xbb, 0xaa, 0xde, 0x87, 0xcd, - 0x79, 0x00, 0xa7, 0xcc, 0x09, 0x1e, 0xc9, 0xd0, 0x6d, 0x3e, 0xe4, 0x77, 0x7c, 0xea, 0x0c, 0xa2, - 0x49, 0xc5, 0x4b, 0xe3, 0x5e, 0xe6, 0x73, 0x4d, 0x69, 0x50, 0xc8, 0xd6, 0xa2, 0x41, 0x37, 0x84, - 0x04, 0x71, 0x67, 0xa9, 0x0d, 0xf4, 0x2b, 0x28, 0x3f, 0x73, 0xbc, 0xf5, 0x6c, 0x17, 0xc2, 0x45, - 0xe9, 0x4b, 0xed, 0x36, 0xa7, 0x0b, 0xda, 0x72, 0x5d, 0xc8, 0xbc, 0x57, 0xa7, 0xbc, 0x2d, 0x35, - 0x7b, 0x65, 0x61, 0x6c, 0x4b, 0x35, 0x9e, 0x56, 0xc6, 0xc7, 0xbc, 0xcc, 0x1c, 0x26, 0xc3, 0x4a, - 0xa3, 0x8c, 0x84, 0x98, 0x4d, 0xd8, 0xd8, 0x23, 0xbe, 0x8f, 0xbb, 0xab, 0xf2, 0x64, 0x3a, 0x70, - 0xe9, 0x0c, 0xa9, 0x36, 0xba, 0x06, 0x45, 0xfe, 0x4a, 0x3e, 0x9a, 0x26, 0xbe, 0xc0, 0xed, 0x43, - 0xcf, 0xe5, 0x53, 0x9c, 0x67, 0x62, 0x4a, 0xbe, 0x23, 0x0a, 0xdc, 0xe6, 0x53, 0x06, 0x14, 0x4e, - 0x71, 0x48, 0x3d, 0x22, 0x75, 0xa0, 0x64, 0x4f, 0x4c, 0x73, 0x1b, 0x2e, 0x3d, 0xe9, 0x47, 0xcc, - 0x25, 0x2f, 0xfd, 0x55, 0xb7, 0xb6, 0x09, 0x59, 0x9f, 0xbc, 0x14, 0xae, 0x8b, 0x36, 0x1f, 0xf2, - 0x74, 0x1d, 0x3a, 0x11, 0x5d, 0xd5, 0xe2, 0xcc, 0x0f, 0xa1, 0x62, 0x63, 0x1a, 0x0d, 0x57, 0x01, - 0xdb, 0xbf, 0x00, 0xe4, 0x78, 0x75, 0xa0, 0xc7, 0x90, 0x17, 0xed, 0x0e, 0x35, 0x92, 0xca, 0x28, - 0xde, 0x57, 0xab, 0x37, 0x96, 0x20, 0x54, 0xd2, 0x9e, 0x81, 0x2e, 0xdf, 0x7f, 0xe8, 0x56, 0x12, - 0x78, 0xe1, 0xc1, 0x5d, 0xbd, 0xbd, 0x0a, 0xa6, 0x1c, 0xcb, 0x30, 0x43, 0x96, 0x1a, 0xe6, 0x59, - 0xe9, 0xa5, 0x86, 0x19, 0xab, 0xa7, 0x03, 0xd0, 0xe5, 0x7b, 0x11, 0x25, 0x82, 0x67, 0x9e, 0xa6, - 0x55, 0x73, 0x19, 0x44, 0x39, 0xec, 0x40, 0x8e, 0xeb, 0x37, 0xaa, 0x27, 0x61, 0x63, 0x0d, 0xa0, - 0xda, 0x48, 0x07, 0x28, 0x57, 0x3b, 0x90, 0x17, 0x57, 0x9d, 0x7c, 0xd2, 0x38, 0x0b, 0xaa, 0x57, - 0x17, 0xc8, 0xbf, 0xcf, 0x7f, 0x8c, 0xa1, 0x3d, 0xd0, 0x25, 0x0b, 0x92, 0x8f, 0x37, 0xc3, 0x90, - 0x54, 0x27, 0x07, 0x00, 0xb1, 0x1f, 0x02, 0x1f, 0x25, 0xde, 0x53, 0x52, 0x8b, 0x49, 0x75, 0xf8, - 0x05, 0xe4, 0x78, 0x97, 0x4f, 0xce, 0x51, 0xac, 0xff, 0xa7, 0x3a, 0xf8, 0x12, 0x72, 0x5c, 0xb9, - 0x50, 0x22, 0x67, 0x16, 0x9f, 0xdd, 0xa9, 0x7e, 0x3a, 0x50, 0x3a, 0x7b, 0xae, 0xa2, 0x0f, 0x52, - 0x32, 0x34, 0xf3, 0x9a, 0x4d, 0x75, 0xb5, 0x0f, 0x05, 0xf5, 0x86, 0x40, 0x89, 0x34, 0x99, 0x7d, - 0x60, 0xa4, 0xba, 0x79, 0x08, 0xba, 0x6c, 0x58, 0xc9, 0x65, 0xb3, 0xd0, 0xcc, 0x96, 0x1c, 0x2d, - 0xc7, 0xa5, 0x3c, 0x39, 0xc7, 0xb1, 0x86, 0x91, 0xcc, 0xc3, 0x99, 0x2e, 0xa0, 0x84, 0x81, 0xa6, - 0x0b, 0x03, 0x5d, 0x29, 0x0c, 0x53, 0x56, 0xdb, 0x50, 0x50, 0x02, 0x9b, 0x92, 0xa8, 0x19, 0x9d, - 0xae, 0xde, 0x5c, 0x8a, 0x51, 0x3e, 0x1f, 0x42, 0x71, 0xa2, 0xa8, 0x28, 0x71, 0xc1, 0x9c, 0xde, - 0xa6, 0x65, 0x6d, 0xf7, 0xe0, 0xf5, 0xdb, 0xda, 0x85, 0x3f, 0xde, 0xd6, 0x2e, 0xfc, 0x34, 0xae, - 0x69, 0xaf, 0xc7, 0x35, 0xed, 0xb7, 0x71, 0x4d, 0xfb, 0x7b, 0x5c, 0xd3, 0xbe, 0xfb, 0xec, 0xbc, - 0xff, 0x59, 0xd9, 0xe6, 0x7f, 0x9e, 0x67, 0x8e, 0x75, 0xb1, 0xc5, 0x9d, 0xff, 0x02, 0x00, 0x00, - 0xff, 0xff, 0xd3, 0xbf, 0xc3, 0xa9, 0x9b, 0x11, 0x00, 0x00, -} - -func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if len(m.ParentCheckpoint) > 0 { - i -= len(m.ParentCheckpoint) - copy(dAtA[i:], m.ParentCheckpoint) - i = encodeVarintShim(dAtA, i, uint64(len(m.ParentCheckpoint))) - i-- - dAtA[i] = 0x4a - } - if len(m.Checkpoint) > 0 { - i -= len(m.Checkpoint) - copy(dAtA[i:], m.Checkpoint) - i = encodeVarintShim(dAtA, i, uint64(len(m.Checkpoint))) - i-- - dAtA[i] = 0x42 - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x3a - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x32 - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0x2a - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Rootfs) > 0 { - for iNdEx := len(m.Rootfs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rootfs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Bundle) > 0 { - i -= len(m.Bundle) - copy(dAtA[i:], m.Bundle) - i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *DeleteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintShim(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x1a - if m.ExitStatus != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x10 - } - if m.Pid != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecProcessRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Spec != nil { - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x32 - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x2a - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0x22 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecProcessResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResizePtyRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResizePtyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Height != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x20 - } - if m.Width != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Width)) - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x5a - } - n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err4 != nil { - return 0, err4 - } - i -= n4 - i = encodeVarintShim(dAtA, i, uint64(n4)) - i-- - dAtA[i] = 0x52 - if m.ExitStatus != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x48 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x3a - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x32 - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0x2a - } - if m.Status != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x20 - } - if m.Pid != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x18 - } - if len(m.Bundle) > 0 { - i -= len(m.Bundle) - copy(dAtA[i:], m.Bundle) - i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *KillRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KillRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KillRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.All { - i-- - if m.All { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.Signal != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Signal)) - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CloseIORequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CloseIORequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CloseIORequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Stdin { - i-- - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PidsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PidsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PidsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PidsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Processes) > 0 { - for iNdEx := len(m.Processes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Processes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckpointTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CheckpointTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintShim(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Annotations) > 0 { - for k := range m.Annotations { - v := m.Annotations[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintShim(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintShim(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintShim(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if m.Resources != nil { - { - size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StartRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StartRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StartResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StartResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *WaitRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WaitRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WaitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WaitResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WaitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err7 != nil { - return 0, err7 - } - i -= n7 - i = encodeVarintShim(dAtA, i, uint64(n7)) - i-- - dAtA[i] = 0x12 - if m.ExitStatus != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *StatsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Stats != nil { - { - size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ConnectRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConnectRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConnectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ConnectResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConnectResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConnectResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintShim(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x1a - } - if m.TaskPid != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.TaskPid)) - i-- - dAtA[i] = 0x10 - } - if m.ShimPid != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.ShimPid)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ShutdownRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ShutdownRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ShutdownRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Now { - i-- - if m.Now { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PauseRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PauseRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PauseRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResumeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResumeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResumeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintShim(dAtA []byte, offset int, v uint64) int { - offset -= sovShim(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CreateTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Bundle) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if len(m.Rootfs) > 0 { - for _, e := range m.Rootfs { - l = e.Size() - n += 1 + l + sovShim(uint64(l)) - } - } - if m.Terminal { - n += 2 - } - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Checkpoint) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ParentCheckpoint) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateTaskResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pid != 0 { - n += 1 + sovShim(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pid != 0 { - n += 1 + sovShim(uint64(m.Pid)) - } - if m.ExitStatus != 0 { - n += 1 + sovShim(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovShim(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExecProcessRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Terminal { - n += 2 - } - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Spec != nil { - l = m.Spec.Size() - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExecProcessResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResizePtyRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Width != 0 { - n += 1 + sovShim(uint64(m.Width)) - } - if m.Height != 0 { - n += 1 + sovShim(uint64(m.Height)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Bundle) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovShim(uint64(m.Pid)) - } - if m.Status != 0 { - n += 1 + sovShim(uint64(m.Status)) - } - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Terminal { - n += 2 - } - if m.ExitStatus != 0 { - n += 1 + sovShim(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovShim(uint64(l)) - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *KillRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Signal != 0 { - n += 1 + sovShim(uint64(m.Signal)) - } - if m.All { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CloseIORequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Stdin { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PidsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PidsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Processes) > 0 { - for _, e := range m.Processes { - l = e.Size() - n += 1 + l + sovShim(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CheckpointTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Path) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Resources != nil { - l = m.Resources.Size() - n += 1 + l + sovShim(uint64(l)) - } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovShim(uint64(len(k))) + 1 + len(v) + sovShim(uint64(len(v))) - n += mapEntrySize + 1 + sovShim(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StartRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StartResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pid != 0 { - n += 1 + sovShim(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WaitRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WaitResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ExitStatus != 0 { - n += 1 + sovShim(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovShim(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Stats != nil { - l = m.Stats.Size() - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ConnectRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ConnectResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ShimPid != 0 { - n += 1 + sovShim(uint64(m.ShimPid)) - } - if m.TaskPid != 0 { - n += 1 + sovShim(uint64(m.TaskPid)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ShutdownRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Now { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PauseRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResumeRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovShim(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozShim(x uint64) (n int) { - return sovShim(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CreateTaskRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForRootfs := "[]*Mount{" - for _, f := range this.Rootfs { - repeatedStringForRootfs += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForRootfs += "}" - s := strings.Join([]string{`&CreateTaskRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`, - `Rootfs:` + repeatedStringForRootfs + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`, - `ParentCheckpoint:` + fmt.Sprintf("%v", this.ParentCheckpoint) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateTaskResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateTaskResponse{`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteResponse{`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ExecProcessRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExecProcessRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "types1.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ExecProcessResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExecProcessResponse{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ResizePtyRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResizePtyRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Width:` + fmt.Sprintf("%v", this.Width) + `,`, - `Height:` + fmt.Sprintf("%v", this.Height) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StateRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StateRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StateResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StateResponse{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *KillRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&KillRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, - `All:` + fmt.Sprintf("%v", this.All) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CloseIORequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CloseIORequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PidsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PidsRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PidsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForProcesses := "[]*ProcessInfo{" - for _, f := range this.Processes { - repeatedStringForProcesses += strings.Replace(fmt.Sprintf("%v", f), "ProcessInfo", "task.ProcessInfo", 1) + "," - } - repeatedStringForProcesses += "}" - s := strings.Join([]string{`&PidsResponse{`, - `Processes:` + repeatedStringForProcesses + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CheckpointTaskRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CheckpointTaskRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateTaskRequest) String() string { - if this == nil { - return "nil" - } - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k, _ := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - s := strings.Join([]string{`&UpdateTaskRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "types1.Any", 1) + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StartRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StartRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StartResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StartResponse{`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WaitRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WaitRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WaitResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WaitResponse{`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatsRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatsResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatsResponse{`, - `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "Any", "types1.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ConnectRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConnectRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ConnectResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConnectResponse{`, - `ShimPid:` + fmt.Sprintf("%v", this.ShimPid) + `,`, - `TaskPid:` + fmt.Sprintf("%v", this.TaskPid) + `,`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ShutdownRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ShutdownRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Now:` + fmt.Sprintf("%v", this.Now) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PauseRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PauseRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ResumeRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResumeRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringShim(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} - -type TaskService interface { - State(ctx context.Context, req *StateRequest) (*StateResponse, error) - Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) - Start(ctx context.Context, req *StartRequest) (*StartResponse, error) - Delete(ctx context.Context, req *DeleteRequest) (*DeleteResponse, error) - Pids(ctx context.Context, req *PidsRequest) (*PidsResponse, error) - Pause(ctx context.Context, req *PauseRequest) (*types1.Empty, error) - Resume(ctx context.Context, req *ResumeRequest) (*types1.Empty, error) - Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*types1.Empty, error) - Kill(ctx context.Context, req *KillRequest) (*types1.Empty, error) - Exec(ctx context.Context, req *ExecProcessRequest) (*types1.Empty, error) - ResizePty(ctx context.Context, req *ResizePtyRequest) (*types1.Empty, error) - CloseIO(ctx context.Context, req *CloseIORequest) (*types1.Empty, error) - Update(ctx context.Context, req *UpdateTaskRequest) (*types1.Empty, error) - Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) - Stats(ctx context.Context, req *StatsRequest) (*StatsResponse, error) - Connect(ctx context.Context, req *ConnectRequest) (*ConnectResponse, error) - Shutdown(ctx context.Context, req *ShutdownRequest) (*types1.Empty, error) -} - -func RegisterTaskService(srv *github_com_containerd_ttrpc.Server, svc TaskService) { - srv.Register("containerd.task.v2.Task", map[string]github_com_containerd_ttrpc.Method{ - "State": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req StateRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.State(ctx, &req) - }, - "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req CreateTaskRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Create(ctx, &req) - }, - "Start": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req StartRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Start(ctx, &req) - }, - "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req DeleteRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Delete(ctx, &req) - }, - "Pids": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req PidsRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Pids(ctx, &req) - }, - "Pause": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req PauseRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Pause(ctx, &req) - }, - "Resume": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ResumeRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Resume(ctx, &req) - }, - "Checkpoint": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req CheckpointTaskRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Checkpoint(ctx, &req) - }, - "Kill": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req KillRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Kill(ctx, &req) - }, - "Exec": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ExecProcessRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Exec(ctx, &req) - }, - "ResizePty": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ResizePtyRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.ResizePty(ctx, &req) - }, - "CloseIO": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req CloseIORequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.CloseIO(ctx, &req) - }, - "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req UpdateTaskRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Update(ctx, &req) - }, - "Wait": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req WaitRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Wait(ctx, &req) - }, - "Stats": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req StatsRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Stats(ctx, &req) - }, - "Connect": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ConnectRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Connect(ctx, &req) - }, - "Shutdown": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ShutdownRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Shutdown(ctx, &req) - }, - }) -} - -type taskClient struct { - client *github_com_containerd_ttrpc.Client -} - -func NewTaskClient(client *github_com_containerd_ttrpc.Client) TaskService { - return &taskClient{ - client: client, - } -} - -func (c *taskClient) State(ctx context.Context, req *StateRequest) (*StateResponse, error) { - var resp StateResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "State", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) { - var resp CreateTaskResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Create", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) { - var resp StartResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Start", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Delete(ctx context.Context, req *DeleteRequest) (*DeleteResponse, error) { - var resp DeleteResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Delete", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Pids(ctx context.Context, req *PidsRequest) (*PidsResponse, error) { - var resp PidsResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Pids", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Pause(ctx context.Context, req *PauseRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Pause", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Resume(ctx context.Context, req *ResumeRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Resume", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Checkpoint", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Kill(ctx context.Context, req *KillRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Kill", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Exec(ctx context.Context, req *ExecProcessRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Exec", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "ResizePty", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) CloseIO(ctx context.Context, req *CloseIORequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "CloseIO", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Update(ctx context.Context, req *UpdateTaskRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Update", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) { - var resp WaitResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Wait", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Stats(ctx context.Context, req *StatsRequest) (*StatsResponse, error) { - var resp StatsResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Stats", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Connect(ctx context.Context, req *ConnectRequest) (*ConnectResponse, error) { - var resp ConnectResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Connect", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Shutdown(ctx context.Context, req *ShutdownRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Shutdown", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} -func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bundle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rootfs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rootfs = append(m.Rootfs, &types.Mount{}) - if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Checkpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentCheckpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ParentCheckpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &types1.Any{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateTaskResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateTaskResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecProcessRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Spec == nil { - m.Spec = &types1.Any{} - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecProcessResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecProcessResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecProcessResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResizePtyRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResizePtyRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Width", wireType) - } - m.Width = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Width |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bundle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= task.Status(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KillRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KillRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KillRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) - } - m.Signal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Signal |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.All = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CloseIORequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CloseIORequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CloseIORequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PidsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PidsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PidsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PidsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Processes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Processes = append(m.Processes, &task.ProcessInfo{}) - if err := m.Processes[len(m.Processes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckpointTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckpointTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &types1.Any{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resources == nil { - m.Resources = &types1.Any{} - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthShim - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthShim - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthShim - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthShim - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StartRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StartResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WaitRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WaitRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WaitRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WaitResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WaitResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WaitResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Stats == nil { - m.Stats = &types1.Any{} - } - if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConnectRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConnectRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConnectRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConnectResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConnectResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConnectResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ShimPid", wireType) - } - m.ShimPid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ShimPid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TaskPid", wireType) - } - m.TaskPid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TaskPid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ShutdownRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ShutdownRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ShutdownRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Now", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Now = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PauseRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PauseRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PauseRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResumeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResumeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResumeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipShim(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowShim - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowShim - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowShim - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthShim - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupShim - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthShim - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthShim = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowShim = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupShim = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/sandbox.go b/vendor/github.com/containerd/containerd/sandbox.go new file mode 100644 index 0000000000..2e46da9f32 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox.go @@ -0,0 +1,247 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containerd + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/oci" + "github.com/containerd/containerd/protobuf/types" + api "github.com/containerd/containerd/sandbox" + "github.com/containerd/typeurl/v2" +) + +// Sandbox is a high level client to containerd's sandboxes. +type Sandbox interface { + // ID is a sandbox identifier + ID() string + // PID returns sandbox's process PID or error if its not yet started. + PID() (uint32, error) + // NewContainer creates new container that will belong to this sandbox + NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) + // Labels returns the labels set on the sandbox + Labels(ctx context.Context) (map[string]string, error) + // Start starts new sandbox instance + Start(ctx context.Context) error + // Stop sends stop request to the shim instance. + Stop(ctx context.Context) error + // Wait blocks until sandbox process exits. + Wait(ctx context.Context) (<-chan ExitStatus, error) + // Shutdown removes sandbox from the metadata store and shutdowns shim instance. + Shutdown(ctx context.Context) error +} + +type sandboxClient struct { + pid *uint32 + client *Client + metadata api.Sandbox +} + +func (s *sandboxClient) ID() string { + return s.metadata.ID +} + +func (s *sandboxClient) PID() (uint32, error) { + if s.pid == nil { + return 0, fmt.Errorf("sandbox not started") + } + + return *s.pid, nil +} + +func (s *sandboxClient) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) { + return s.client.NewContainer(ctx, id, append(opts, WithSandbox(s.ID()))...) +} + +func (s *sandboxClient) Labels(ctx context.Context) (map[string]string, error) { + sandbox, err := s.client.SandboxStore().Get(ctx, s.ID()) + if err != nil { + return nil, err + } + + return sandbox.Labels, nil +} + +func (s *sandboxClient) Start(ctx context.Context) error { + resp, err := s.client.SandboxController().Start(ctx, s.ID()) + if err != nil { + return err + } + + s.pid = &resp.Pid + return nil +} + +func (s *sandboxClient) Wait(ctx context.Context) (<-chan ExitStatus, error) { + c := make(chan ExitStatus, 1) + go func() { + defer close(c) + + exitStatus, err := s.client.SandboxController().Wait(ctx, s.ID()) + if err != nil { + c <- ExitStatus{ + code: UnknownExitStatus, + err: err, + } + return + } + + c <- ExitStatus{ + code: exitStatus.ExitStatus, + exitedAt: exitStatus.ExitedAt, + } + }() + + return c, nil +} + +func (s *sandboxClient) Stop(ctx context.Context) error { + return s.client.SandboxController().Stop(ctx, s.ID()) +} + +func (s *sandboxClient) Shutdown(ctx context.Context) error { + if err := s.client.SandboxController().Shutdown(ctx, s.ID()); err != nil { + return fmt.Errorf("failed to shutdown sandbox: %w", err) + } + + if err := s.client.SandboxStore().Delete(ctx, s.ID()); err != nil { + return fmt.Errorf("failed to delete sandbox from store: %w", err) + } + + return nil +} + +// NewSandbox creates new sandbox client +func (c *Client) NewSandbox(ctx context.Context, sandboxID string, opts ...NewSandboxOpts) (Sandbox, error) { + if sandboxID == "" { + return nil, errors.New("sandbox ID must be specified") + } + + newSandbox := api.Sandbox{ + ID: sandboxID, + CreatedAt: time.Now().UTC(), + UpdatedAt: time.Now().UTC(), + } + + for _, opt := range opts { + if err := opt(ctx, c, &newSandbox); err != nil { + return nil, err + } + } + + metadata, err := c.SandboxStore().Create(ctx, newSandbox) + if err != nil { + return nil, err + } + + return &sandboxClient{ + pid: nil, // Not yet started + client: c, + metadata: metadata, + }, nil +} + +// LoadSandbox laods existing sandbox metadata object using the id +func (c *Client) LoadSandbox(ctx context.Context, id string) (Sandbox, error) { + sandbox, err := c.SandboxStore().Get(ctx, id) + if err != nil { + return nil, err + } + + status, err := c.SandboxController().Status(ctx, id, false) + if err != nil { + return nil, fmt.Errorf("failed to load sandbox %s, status request failed: %w", id, err) + } + + return &sandboxClient{ + pid: &status.Pid, + client: c, + metadata: sandbox, + }, nil +} + +// NewSandboxOpts is a sandbox options and extensions to be provided by client +type NewSandboxOpts func(ctx context.Context, client *Client, sandbox *api.Sandbox) error + +// WithSandboxRuntime allows a user to specify the runtime to be used to run a sandbox +func WithSandboxRuntime(name string, options interface{}) NewSandboxOpts { + return func(ctx context.Context, client *Client, s *api.Sandbox) error { + if options == nil { + options = &types.Empty{} + } + + opts, err := typeurl.MarshalAny(options) + if err != nil { + return fmt.Errorf("failed to marshal sandbox runtime options: %w", err) + } + + s.Runtime = api.RuntimeOpts{ + Name: name, + Options: opts, + } + + return nil + } +} + +// WithSandboxSpec will provide the sandbox runtime spec +func WithSandboxSpec(s *oci.Spec, opts ...oci.SpecOpts) NewSandboxOpts { + return func(ctx context.Context, client *Client, sandbox *api.Sandbox) error { + c := &containers.Container{ID: sandbox.ID} + + if err := oci.ApplyOpts(ctx, client, c, s, opts...); err != nil { + return err + } + + spec, err := typeurl.MarshalAny(s) + if err != nil { + return fmt.Errorf("failed to marshal spec: %w", err) + } + + sandbox.Spec = spec + return nil + } +} + +// WithSandboxExtension attaches an extension to sandbox +func WithSandboxExtension(name string, ext interface{}) NewSandboxOpts { + return func(ctx context.Context, client *Client, s *api.Sandbox) error { + if s.Extensions == nil { + s.Extensions = make(map[string]typeurl.Any) + } + + any, err := typeurl.MarshalAny(ext) + if err != nil { + return fmt.Errorf("failed to marshal sandbox extension: %w", err) + } + + s.Extensions[name] = any + return err + } +} + +// WithSandboxLabels attaches map of labels to sandbox +func WithSandboxLabels(labels map[string]string) NewSandboxOpts { + return func(ctx context.Context, client *Client, sandbox *api.Sandbox) error { + sandbox.Labels = labels + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/sandbox/bridge.go b/vendor/github.com/containerd/containerd/sandbox/bridge.go new file mode 100644 index 0000000000..bc7d999ce5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox/bridge.go @@ -0,0 +1,77 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sandbox + +import ( + "context" + "fmt" + + "github.com/containerd/ttrpc" + "google.golang.org/grpc" + + api "github.com/containerd/containerd/api/runtime/sandbox/v1" +) + +// NewClient returns a new sandbox client that handles both GRPC and TTRPC clients. +func NewClient(client interface{}) (api.TTRPCSandboxService, error) { + switch c := client.(type) { + case *ttrpc.Client: + return api.NewTTRPCSandboxClient(c), nil + case grpc.ClientConnInterface: + return &grpcBridge{api.NewSandboxClient(c)}, nil + default: + return nil, fmt.Errorf("unsupported client type %T", client) + } +} + +type grpcBridge struct { + client api.SandboxClient +} + +var _ api.TTRPCSandboxService = (*grpcBridge)(nil) + +func (g *grpcBridge) CreateSandbox(ctx context.Context, request *api.CreateSandboxRequest) (*api.CreateSandboxResponse, error) { + return g.client.CreateSandbox(ctx, request) +} + +func (g *grpcBridge) StartSandbox(ctx context.Context, request *api.StartSandboxRequest) (*api.StartSandboxResponse, error) { + return g.client.StartSandbox(ctx, request) +} + +func (g *grpcBridge) Platform(ctx context.Context, request *api.PlatformRequest) (*api.PlatformResponse, error) { + return g.client.Platform(ctx, request) +} + +func (g *grpcBridge) StopSandbox(ctx context.Context, request *api.StopSandboxRequest) (*api.StopSandboxResponse, error) { + return g.client.StopSandbox(ctx, request) +} + +func (g *grpcBridge) WaitSandbox(ctx context.Context, request *api.WaitSandboxRequest) (*api.WaitSandboxResponse, error) { + return g.client.WaitSandbox(ctx, request) +} + +func (g *grpcBridge) SandboxStatus(ctx context.Context, request *api.SandboxStatusRequest) (*api.SandboxStatusResponse, error) { + return g.client.SandboxStatus(ctx, request) +} + +func (g *grpcBridge) PingSandbox(ctx context.Context, request *api.PingRequest) (*api.PingResponse, error) { + return g.client.PingSandbox(ctx, request) +} + +func (g *grpcBridge) ShutdownSandbox(ctx context.Context, request *api.ShutdownSandboxRequest) (*api.ShutdownSandboxResponse, error) { + return g.client.ShutdownSandbox(ctx, request) +} diff --git a/vendor/github.com/containerd/containerd/sandbox/controller.go b/vendor/github.com/containerd/containerd/sandbox/controller.go new file mode 100644 index 0000000000..b74f82c108 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox/controller.go @@ -0,0 +1,125 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sandbox + +import ( + "context" + "fmt" + "time" + + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/platforms" + "github.com/containerd/typeurl/v2" +) + +type CreateOptions struct { + Rootfs []*types.Mount + // Options are used to pass arbitrary options to the shim when creating a new sandbox. + // CRI will use this to pass PodSandboxConfig. + // Don't confuse this with Runtime options, which are passed at shim instance start + // to setup global shim configuration. + Options typeurl.Any + NetNSPath string +} + +type CreateOpt func(*CreateOptions) error + +// WithRootFS is used to create a sandbox with the provided rootfs mount +// TODO: Switch to mount.Mount once target added +func WithRootFS(m []*types.Mount) CreateOpt { + return func(co *CreateOptions) error { + co.Rootfs = m + return nil + } +} + +// WithOptions allows passing arbitrary options when creating a new sandbox. +func WithOptions(options any) CreateOpt { + return func(co *CreateOptions) error { + var err error + co.Options, err = typeurl.MarshalAny(options) + if err != nil { + return fmt.Errorf("failed to marshal sandbox options: %w", err) + } + + return nil + } +} + +// WithNetNSPath used to assign network namespace path of a sandbox. +func WithNetNSPath(netNSPath string) CreateOpt { + return func(co *CreateOptions) error { + co.NetNSPath = netNSPath + return nil + } +} + +type StopOptions struct { + Timeout *time.Duration +} + +type StopOpt func(*StopOptions) + +func WithTimeout(timeout time.Duration) StopOpt { + return func(so *StopOptions) { + so.Timeout = &timeout + } +} + +// Controller is an interface to manage sandboxes at runtime. +// When running in sandbox mode, shim expected to implement `SandboxService`. +// Shim lifetimes are now managed manually via sandbox API by the containerd's client. +type Controller interface { + // Create is used to initialize sandbox environment. (mounts, any) + Create(ctx context.Context, sandboxID string, opts ...CreateOpt) error + // Start will start previously created sandbox. + Start(ctx context.Context, sandboxID string) (ControllerInstance, error) + // Platform returns target sandbox OS that will be used by Controller. + // containerd will rely on this to generate proper OCI spec. + Platform(_ctx context.Context, _sandboxID string) (platforms.Platform, error) + // Stop will stop sandbox instance + Stop(ctx context.Context, sandboxID string, opts ...StopOpt) error + // Wait blocks until sandbox process exits. + Wait(ctx context.Context, sandboxID string) (ExitStatus, error) + // Status will query sandbox process status. It is heavier than Ping call and must be used whenever you need to + // gather metadata about current sandbox state (status, uptime, resource use, etc). + Status(ctx context.Context, sandboxID string, verbose bool) (ControllerStatus, error) + // Shutdown deletes and cleans all tasks and sandbox instance. + Shutdown(ctx context.Context, sandboxID string) error +} + +type ControllerInstance struct { + SandboxID string + Pid uint32 + CreatedAt time.Time + Labels map[string]string +} + +type ExitStatus struct { + ExitStatus uint32 + ExitedAt time.Time +} + +type ControllerStatus struct { + SandboxID string + Pid uint32 + State string + Info map[string]string + CreatedAt time.Time + ExitedAt time.Time + Extra typeurl.Any +} diff --git a/vendor/github.com/containerd/containerd/sandbox/helpers.go b/vendor/github.com/containerd/containerd/sandbox/helpers.go new file mode 100644 index 0000000000..bfe0b23d33 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox/helpers.go @@ -0,0 +1,68 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sandbox + +import ( + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/protobuf" + gogo_types "github.com/containerd/containerd/protobuf/types" + "github.com/containerd/typeurl/v2" +) + +// ToProto will map Sandbox struct to it's protobuf definition +func ToProto(sandbox *Sandbox) *types.Sandbox { + extensions := make(map[string]*gogo_types.Any) + for k, v := range sandbox.Extensions { + extensions[k] = protobuf.FromAny(v) + } + return &types.Sandbox{ + SandboxID: sandbox.ID, + Runtime: &types.Sandbox_Runtime{ + Name: sandbox.Runtime.Name, + Options: protobuf.FromAny(sandbox.Runtime.Options), + }, + Labels: sandbox.Labels, + CreatedAt: protobuf.ToTimestamp(sandbox.CreatedAt), + UpdatedAt: protobuf.ToTimestamp(sandbox.UpdatedAt), + Extensions: extensions, + Spec: protobuf.FromAny(sandbox.Spec), + } +} + +// FromProto map protobuf sandbox definition to Sandbox struct +func FromProto(sandboxpb *types.Sandbox) Sandbox { + runtime := RuntimeOpts{ + Name: sandboxpb.Runtime.Name, + Options: sandboxpb.Runtime.Options, + } + + extensions := make(map[string]typeurl.Any) + for k, v := range sandboxpb.Extensions { + v := v + extensions[k] = v + } + + return Sandbox{ + ID: sandboxpb.SandboxID, + Labels: sandboxpb.Labels, + Runtime: runtime, + Spec: sandboxpb.Spec, + CreatedAt: protobuf.FromTimestamp(sandboxpb.CreatedAt), + UpdatedAt: protobuf.FromTimestamp(sandboxpb.UpdatedAt), + Extensions: extensions, + } +} diff --git a/vendor/github.com/containerd/containerd/sandbox/proxy/controller.go b/vendor/github.com/containerd/containerd/sandbox/proxy/controller.go new file mode 100644 index 0000000000..6ff9c94130 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox/proxy/controller.go @@ -0,0 +1,142 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + + api "github.com/containerd/containerd/api/services/sandbox/v1" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/sandbox" + "google.golang.org/protobuf/types/known/anypb" +) + +// remoteSandboxController is a low level GRPC client for containerd's sandbox controller service +type remoteSandboxController struct { + client api.ControllerClient +} + +var _ sandbox.Controller = (*remoteSandboxController)(nil) + +// NewSandboxController creates a client for a sandbox controller +func NewSandboxController(client api.ControllerClient) sandbox.Controller { + return &remoteSandboxController{client: client} +} + +func (s *remoteSandboxController) Create(ctx context.Context, sandboxID string, opts ...sandbox.CreateOpt) error { + var options sandbox.CreateOptions + for _, opt := range opts { + opt(&options) + } + _, err := s.client.Create(ctx, &api.ControllerCreateRequest{ + SandboxID: sandboxID, + Rootfs: options.Rootfs, + Options: &anypb.Any{ + TypeUrl: options.Options.GetTypeUrl(), + Value: options.Options.GetValue(), + }, + NetnsPath: options.NetNSPath, + }) + if err != nil { + return errdefs.FromGRPC(err) + } + + return nil +} + +func (s *remoteSandboxController) Start(ctx context.Context, sandboxID string) (sandbox.ControllerInstance, error) { + resp, err := s.client.Start(ctx, &api.ControllerStartRequest{SandboxID: sandboxID}) + if err != nil { + return sandbox.ControllerInstance{}, errdefs.FromGRPC(err) + } + + return sandbox.ControllerInstance{ + SandboxID: sandboxID, + Pid: resp.GetPid(), + CreatedAt: resp.GetCreatedAt().AsTime(), + Labels: resp.GetLabels(), + }, nil +} + +func (s *remoteSandboxController) Platform(ctx context.Context, sandboxID string) (platforms.Platform, error) { + resp, err := s.client.Platform(ctx, &api.ControllerPlatformRequest{SandboxID: sandboxID}) + if err != nil { + return platforms.Platform{}, errdefs.FromGRPC(err) + } + + platform := resp.GetPlatform() + return platforms.Platform{ + Architecture: platform.GetArchitecture(), + OS: platform.GetOS(), + Variant: platform.GetVariant(), + }, nil +} + +func (s *remoteSandboxController) Stop(ctx context.Context, sandboxID string, opts ...sandbox.StopOpt) error { + var soptions sandbox.StopOptions + for _, opt := range opts { + opt(&soptions) + } + req := &api.ControllerStopRequest{SandboxID: sandboxID} + if soptions.Timeout != nil { + req.TimeoutSecs = uint32(soptions.Timeout.Seconds()) + } + _, err := s.client.Stop(ctx, req) + if err != nil { + return errdefs.FromGRPC(err) + } + + return nil +} + +func (s *remoteSandboxController) Shutdown(ctx context.Context, sandboxID string) error { + _, err := s.client.Shutdown(ctx, &api.ControllerShutdownRequest{SandboxID: sandboxID}) + if err != nil { + return errdefs.FromGRPC(err) + } + + return nil +} + +func (s *remoteSandboxController) Wait(ctx context.Context, sandboxID string) (sandbox.ExitStatus, error) { + resp, err := s.client.Wait(ctx, &api.ControllerWaitRequest{SandboxID: sandboxID}) + if err != nil { + return sandbox.ExitStatus{}, errdefs.FromGRPC(err) + } + + return sandbox.ExitStatus{ + ExitStatus: resp.GetExitStatus(), + ExitedAt: resp.GetExitedAt().AsTime(), + }, nil +} + +func (s *remoteSandboxController) Status(ctx context.Context, sandboxID string, verbose bool) (sandbox.ControllerStatus, error) { + resp, err := s.client.Status(ctx, &api.ControllerStatusRequest{SandboxID: sandboxID, Verbose: verbose}) + if err != nil { + return sandbox.ControllerStatus{}, errdefs.FromGRPC(err) + } + return sandbox.ControllerStatus{ + SandboxID: sandboxID, + Pid: resp.GetPid(), + State: resp.GetState(), + Info: resp.GetInfo(), + CreatedAt: resp.GetCreatedAt().AsTime(), + ExitedAt: resp.GetExitedAt().AsTime(), + Extra: resp.GetExtra(), + }, nil +} diff --git a/vendor/github.com/containerd/containerd/sandbox/proxy/store.go b/vendor/github.com/containerd/containerd/sandbox/proxy/store.go new file mode 100644 index 0000000000..64e4a2b320 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox/proxy/store.go @@ -0,0 +1,95 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + + api "github.com/containerd/containerd/api/services/sandbox/v1" + "github.com/containerd/containerd/errdefs" + sb "github.com/containerd/containerd/sandbox" +) + +// remoteSandboxStore is a low-level containerd client to manage sandbox environments metadata +type remoteSandboxStore struct { + client api.StoreClient +} + +var _ sb.Store = (*remoteSandboxStore)(nil) + +// NewSandboxStore create a client for a sandbox store +func NewSandboxStore(client api.StoreClient) sb.Store { + return &remoteSandboxStore{client: client} +} + +func (s *remoteSandboxStore) Create(ctx context.Context, sandbox sb.Sandbox) (sb.Sandbox, error) { + resp, err := s.client.Create(ctx, &api.StoreCreateRequest{ + Sandbox: sb.ToProto(&sandbox), + }) + if err != nil { + return sb.Sandbox{}, errdefs.FromGRPC(err) + } + + return sb.FromProto(resp.Sandbox), nil +} + +func (s *remoteSandboxStore) Update(ctx context.Context, sandbox sb.Sandbox, fieldpaths ...string) (sb.Sandbox, error) { + resp, err := s.client.Update(ctx, &api.StoreUpdateRequest{ + Sandbox: sb.ToProto(&sandbox), + Fields: fieldpaths, + }) + if err != nil { + return sb.Sandbox{}, errdefs.FromGRPC(err) + } + + return sb.FromProto(resp.Sandbox), nil +} + +func (s *remoteSandboxStore) Get(ctx context.Context, id string) (sb.Sandbox, error) { + resp, err := s.client.Get(ctx, &api.StoreGetRequest{ + SandboxID: id, + }) + if err != nil { + return sb.Sandbox{}, errdefs.FromGRPC(err) + } + + return sb.FromProto(resp.Sandbox), nil +} + +func (s *remoteSandboxStore) List(ctx context.Context, filters ...string) ([]sb.Sandbox, error) { + resp, err := s.client.List(ctx, &api.StoreListRequest{ + Filters: filters, + }) + if err != nil { + return nil, errdefs.FromGRPC(err) + } + + out := make([]sb.Sandbox, len(resp.List)) + for i := range resp.List { + out[i] = sb.FromProto(resp.List[i]) + } + + return out, nil +} + +func (s *remoteSandboxStore) Delete(ctx context.Context, id string) error { + _, err := s.client.Delete(ctx, &api.StoreDeleteRequest{ + SandboxID: id, + }) + + return errdefs.FromGRPC(err) +} diff --git a/vendor/github.com/containerd/containerd/sandbox/store.go b/vendor/github.com/containerd/containerd/sandbox/store.go new file mode 100644 index 0000000000..cda646dde2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox/store.go @@ -0,0 +1,116 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sandbox + +import ( + "context" + "fmt" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/typeurl/v2" +) + +// Sandbox is an object stored in metadata database +type Sandbox struct { + // ID uniquely identifies the sandbox in a namespace + ID string + // Labels provide metadata extension for a sandbox + Labels map[string]string + // Runtime shim to use for this sandbox + Runtime RuntimeOpts + // Spec carries the runtime specification used to implement the sandbox + Spec typeurl.Any + // CreatedAt is the time at which the sandbox was created + CreatedAt time.Time + // UpdatedAt is the time at which the sandbox was updated + UpdatedAt time.Time + // Extensions stores client-specified metadata + Extensions map[string]typeurl.Any +} + +// RuntimeOpts holds runtime specific information +type RuntimeOpts struct { + Name string + Options typeurl.Any +} + +// Store is a storage interface for sandbox metadata objects +type Store interface { + // Create a sandbox record in the store + Create(ctx context.Context, sandbox Sandbox) (Sandbox, error) + + // Update the sandbox with the provided sandbox object and fields + Update(ctx context.Context, sandbox Sandbox, fieldpaths ...string) (Sandbox, error) + + // Get sandbox metadata using the id + Get(ctx context.Context, id string) (Sandbox, error) + + // List returns sandboxes that match one or more of the provided filters + List(ctx context.Context, filters ...string) ([]Sandbox, error) + + // Delete a sandbox from metadata store using the id + Delete(ctx context.Context, id string) error +} + +// AddExtension is a helper function to add sandbox metadata extension. +func (s *Sandbox) AddExtension(name string, obj interface{}) error { + if s.Extensions == nil { + s.Extensions = map[string]typeurl.Any{} + } + + out, err := typeurl.MarshalAny(obj) + if err != nil { + return fmt.Errorf("failed to marshal sandbox extension %q: %w", name, err) + } + + s.Extensions[name] = out + return nil +} + +// AddLabel adds a label to sandbox's labels. +func (s *Sandbox) AddLabel(name string, value string) { + if s.Labels == nil { + s.Labels = map[string]string{} + } + + s.Labels[name] = value +} + +// GetExtension retrieves a sandbox extension by name. +func (s *Sandbox) GetExtension(name string, obj interface{}) error { + out, ok := s.Extensions[name] + if !ok { + return errdefs.ErrNotFound + } + + if err := typeurl.UnmarshalTo(out, obj); err != nil { + return fmt.Errorf("failed to unmarshal sandbox extension %q: %w", name, err) + } + + return nil +} + +// GetLabel retrieves a sandbox label by name. +func (s *Sandbox) GetLabel(name string) (string, error) { + out, ok := s.Labels[name] + if !ok { + return "", fmt.Errorf("unable to find label %q in sandbox metadata: %w", name, errdefs.ErrNotFound) + } + + return out, nil +} diff --git a/vendor/github.com/containerd/containerd/services.go b/vendor/github.com/containerd/containerd/services.go index e780e6ccfe..edb8872e55 100644 --- a/vendor/github.com/containerd/containerd/services.go +++ b/vendor/github.com/containerd/containerd/services.go @@ -17,6 +17,8 @@ package containerd import ( + "fmt" + containersapi "github.com/containerd/containerd/api/services/containers/v1" "github.com/containerd/containerd/api/services/diff/v1" imagesapi "github.com/containerd/containerd/api/services/images/v1" @@ -28,6 +30,9 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/sandbox" + srv "github.com/containerd/containerd/services" "github.com/containerd/containerd/services/introspection" "github.com/containerd/containerd/snapshots" ) @@ -43,6 +48,8 @@ type services struct { eventService EventService leasesService leases.Manager introspectionService introspection.Service + sandboxStore sandbox.Store + sandboxController sandbox.Controller } // ServicesOpt allows callers to set options on the services @@ -155,3 +162,95 @@ func WithIntrospectionService(in introspection.Service) ServicesOpt { s.introspectionService = in } } + +// WithSandboxStore sets the sandbox store. +func WithSandboxStore(client sandbox.Store) ServicesOpt { + return func(s *services) { + s.sandboxStore = client + } +} + +// WithSandboxController sets the sandbox controller. +func WithSandboxController(client sandbox.Controller) ServicesOpt { + return func(s *services) { + s.sandboxController = client + } +} + +// WithInMemoryServices is suitable for cases when there is need to use containerd's client from +// another (in-memory) containerd plugin (such as CRI). +func WithInMemoryServices(ic *plugin.InitContext) ClientOpt { + return func(c *clientOpts) error { + var opts []ServicesOpt + for t, fn := range map[plugin.Type]func(interface{}) ServicesOpt{ + plugin.EventPlugin: func(i interface{}) ServicesOpt { + return WithEventService(i.(EventService)) + }, + plugin.LeasePlugin: func(i interface{}) ServicesOpt { + return WithLeasesService(i.(leases.Manager)) + }, + plugin.SandboxStorePlugin: func(i interface{}) ServicesOpt { + return WithSandboxStore(i.(sandbox.Store)) + }, + plugin.SandboxControllerPlugin: func(i interface{}) ServicesOpt { + return WithSandboxController(i.(sandbox.Controller)) + }, + } { + i, err := ic.Get(t) + if err != nil { + return fmt.Errorf("failed to get %q plugin: %w", t, err) + } + opts = append(opts, fn(i)) + } + + plugins, err := ic.GetByType(plugin.ServicePlugin) + if err != nil { + return fmt.Errorf("failed to get service plugin: %w", err) + } + for s, fn := range map[string]func(interface{}) ServicesOpt{ + srv.ContentService: func(s interface{}) ServicesOpt { + return WithContentStore(s.(content.Store)) + }, + srv.ImagesService: func(s interface{}) ServicesOpt { + return WithImageClient(s.(imagesapi.ImagesClient)) + }, + srv.SnapshotsService: func(s interface{}) ServicesOpt { + return WithSnapshotters(s.(map[string]snapshots.Snapshotter)) + }, + srv.ContainersService: func(s interface{}) ServicesOpt { + return WithContainerClient(s.(containersapi.ContainersClient)) + }, + srv.TasksService: func(s interface{}) ServicesOpt { + return WithTaskClient(s.(tasks.TasksClient)) + }, + srv.DiffService: func(s interface{}) ServicesOpt { + return WithDiffClient(s.(diff.DiffClient)) + }, + srv.NamespacesService: func(s interface{}) ServicesOpt { + return WithNamespaceClient(s.(namespacesapi.NamespacesClient)) + }, + srv.IntrospectionService: func(s interface{}) ServicesOpt { + return WithIntrospectionClient(s.(introspectionapi.IntrospectionClient)) + }, + } { + p := plugins[s] + if p == nil { + return fmt.Errorf("service %q not found", s) + } + i, err := p.Instance() + if err != nil { + return fmt.Errorf("failed to get instance of service %q: %w", s, err) + } + if i == nil { + return fmt.Errorf("instance of service %q not found", s) + } + opts = append(opts, fn(i)) + } + + c.services = &services{} + for _, o := range opts { + o(c.services) + } + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go b/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go index eb5855a476..76a9e6eea2 100644 --- a/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go +++ b/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go @@ -26,10 +26,10 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - ptypes "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf" + ptypes "github.com/containerd/containerd/protobuf/types" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -37,6 +37,7 @@ import ( type service struct { store content.Store + api.UnimplementedContentServer } var bufPool = sync.Pool{ @@ -57,11 +58,12 @@ func (s *service) Register(server *grpc.Server) error { } func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) { - if err := req.Digest.Validate(); err != nil { + dg, err := digest.Parse(req.Digest) + if err != nil { return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest) } - bi, err := s.store.Info(ctx, req.Digest) + bi, err := s.store.Info(ctx, dg) if err != nil { return nil, errdefs.ToGRPC(err) } @@ -72,7 +74,8 @@ func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResp } func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) { - if err := req.Info.Digest.Validate(); err != nil { + _, err := digest.Parse(req.Info.Digest) + if err != nil { return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest) } @@ -88,8 +91,8 @@ func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.Upda func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error { var ( - buffer []api.Info - sendBlock = func(block []api.Info) error { + buffer []*api.Info + sendBlock = func(block []*api.Info) error { // send last block return session.Send(&api.ListContentResponse{ Info: block, @@ -98,10 +101,10 @@ func (s *service) List(req *api.ListContentRequest, session api.Content_ListServ ) if err := s.store.Walk(session.Context(), func(info content.Info) error { - buffer = append(buffer, api.Info{ - Digest: info.Digest, - Size_: info.Size, - CreatedAt: info.CreatedAt, + buffer = append(buffer, &api.Info{ + Digest: info.Digest.String(), + Size: info.Size, + CreatedAt: protobuf.ToTimestamp(info.CreatedAt), Labels: info.Labels, }) @@ -130,11 +133,12 @@ func (s *service) List(req *api.ListContentRequest, session api.Content_ListServ func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*ptypes.Empty, error) { log.G(ctx).WithField("digest", req.Digest).Debugf("delete content") - if err := req.Digest.Validate(); err != nil { + dg, err := digest.Parse(req.Digest) + if err != nil { return nil, status.Errorf(codes.InvalidArgument, err.Error()) } - if err := s.store.Delete(ctx, req.Digest); err != nil { + if err := s.store.Delete(ctx, dg); err != nil { return nil, errdefs.ToGRPC(err) } @@ -142,16 +146,17 @@ func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*p } func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error { - if err := req.Digest.Validate(); err != nil { + dg, err := digest.Parse(req.Digest) + if err != nil { return status.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err) } - oi, err := s.store.Info(session.Context(), req.Digest) + oi, err := s.store.Info(session.Context(), dg) if err != nil { return errdefs.ToGRPC(err) } - ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: req.Digest}) + ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: dg}) if err != nil { return errdefs.ToGRPC(err) } @@ -161,7 +166,7 @@ func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServ offset = req.Offset // size is read size, not the expected size of the blob (oi.Size), which the caller might not be aware of. // offset+size can be larger than oi.Size. - size = req.Size_ + size = req.Size // TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably // little inefficient for work over a fast network. We can tune this later. @@ -216,12 +221,12 @@ func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.Stat var resp api.StatusResponse resp.Status = &api.Status{ - StartedAt: status.StartedAt, - UpdatedAt: status.UpdatedAt, + StartedAt: protobuf.ToTimestamp(status.StartedAt), + UpdatedAt: protobuf.ToTimestamp(status.UpdatedAt), Ref: status.Ref, Offset: status.Offset, Total: status.Total, - Expected: status.Expected, + Expected: status.Expected.String(), } return &resp, nil @@ -235,13 +240,13 @@ func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest var resp api.ListStatusesResponse for _, status := range statuses { - resp.Statuses = append(resp.Statuses, api.Status{ - StartedAt: status.StartedAt, - UpdatedAt: status.UpdatedAt, + resp.Statuses = append(resp.Statuses, &api.Status{ + StartedAt: protobuf.ToTimestamp(status.StartedAt), + UpdatedAt: protobuf.ToTimestamp(status.UpdatedAt), Ref: status.Ref, Offset: status.Offset, Total: status.Total, - Expected: status.Expected, + Expected: status.Expected.String(), }) } @@ -289,11 +294,11 @@ func (s *service) Write(session api.Content_WriteServer) (err error) { return status.Errorf(codes.InvalidArgument, "first message must have a reference") } - fields := logrus.Fields{ + fields := log.Fields{ "ref": ref, } total = req.Total - expected = req.Expected + expected = digest.Digest(req.Expected) if total > 0 { fields["total"] = total } @@ -341,12 +346,13 @@ func (s *service) Write(session api.Content_WriteServer) (err error) { // Supporting these two paths is quite awkward but it lets both API // users use the same writer style for each with a minimum of overhead. if req.Expected != "" { - if expected != "" && expected != req.Expected { - log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", req.Expected, expected) + dg := digest.Digest(req.Expected) + if expected != "" && expected != dg { + log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", dg, expected) } - expected = req.Expected + expected = dg - if _, err := s.store.Info(session.Context(), req.Expected); err == nil { + if _, err := s.store.Info(session.Context(), dg); err == nil { if err := wr.Close(); err != nil { log.G(ctx).WithError(err).Error("failed to close writer") } @@ -368,12 +374,12 @@ func (s *service) Write(session api.Content_WriteServer) (err error) { } switch req.Action { - case api.WriteActionStat: - msg.Digest = wr.Digest() - msg.StartedAt = ws.StartedAt - msg.UpdatedAt = ws.UpdatedAt + case api.WriteAction_STAT: + msg.Digest = wr.Digest().String() + msg.StartedAt = protobuf.ToTimestamp(ws.StartedAt) + msg.UpdatedAt = protobuf.ToTimestamp(ws.UpdatedAt) msg.Total = total - case api.WriteActionWrite, api.WriteActionCommit: + case api.WriteAction_WRITE, api.WriteAction_COMMIT: if req.Offset > 0 { // validate the offset if provided if req.Offset != ws.Offset { @@ -406,7 +412,7 @@ func (s *service) Write(session api.Content_WriteServer) (err error) { msg.Offset += int64(n) } - if req.Action == api.WriteActionCommit { + if req.Action == api.WriteAction_COMMIT { var opts []content.Opt if req.Labels != nil { opts = append(opts, content.WithLabels(req.Labels)) @@ -416,14 +422,14 @@ func (s *service) Write(session api.Content_WriteServer) (err error) { } } - msg.Digest = wr.Digest() + msg.Digest = wr.Digest().String() } if err := session.Send(&msg); err != nil { return err } - if req.Action == api.WriteActionCommit { + if req.Action == api.WriteAction_COMMIT { return nil } @@ -446,22 +452,22 @@ func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*ptypes.Emp return &ptypes.Empty{}, nil } -func infoToGRPC(info content.Info) api.Info { - return api.Info{ - Digest: info.Digest, - Size_: info.Size, - CreatedAt: info.CreatedAt, - UpdatedAt: info.UpdatedAt, +func infoToGRPC(info content.Info) *api.Info { + return &api.Info{ + Digest: info.Digest.String(), + Size: info.Size, + CreatedAt: protobuf.ToTimestamp(info.CreatedAt), + UpdatedAt: protobuf.ToTimestamp(info.UpdatedAt), Labels: info.Labels, } } -func infoFromGRPC(info api.Info) content.Info { +func infoFromGRPC(info *api.Info) content.Info { return content.Info{ - Digest: info.Digest, - Size: info.Size_, - CreatedAt: info.CreatedAt, - UpdatedAt: info.UpdatedAt, + Digest: digest.Digest(info.Digest), + Size: info.Size, + CreatedAt: protobuf.FromTimestamp(info.CreatedAt), + UpdatedAt: protobuf.FromTimestamp(info.UpdatedAt), Labels: info.Labels, } } diff --git a/vendor/github.com/containerd/containerd/services/introspection/introspection.go b/vendor/github.com/containerd/containerd/services/introspection/introspection.go index 71758fad88..7f88af4f9f 100644 --- a/vendor/github.com/containerd/containerd/services/introspection/introspection.go +++ b/vendor/github.com/containerd/containerd/services/introspection/introspection.go @@ -22,10 +22,10 @@ import ( api "github.com/containerd/containerd/api/services/introspection/v1" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - ptypes "github.com/gogo/protobuf/types" + ptypes "github.com/containerd/containerd/protobuf/types" ) -// Service defines the instrospection service interface +// Service defines the introspection service interface type Service interface { Plugins(context.Context, []string) (*api.PluginsResponse, error) Server(context.Context, *ptypes.Empty) (*api.ServerResponse, error) diff --git a/vendor/github.com/containerd/containerd/services/introspection/local.go b/vendor/github.com/containerd/containerd/services/introspection/local.go index 47388e4371..7aa291b47b 100644 --- a/vendor/github.com/containerd/containerd/services/introspection/local.go +++ b/vendor/github.com/containerd/containerd/services/introspection/local.go @@ -20,6 +20,7 @@ import ( context "context" "os" "path/filepath" + "runtime" "sync" api "github.com/containerd/containerd/api/services/introspection/v1" @@ -27,10 +28,11 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/filters" "github.com/containerd/containerd/plugin" + ptypes "github.com/containerd/containerd/protobuf/types" "github.com/containerd/containerd/services" - "github.com/gogo/googleapis/google/rpc" - ptypes "github.com/gogo/protobuf/types" "github.com/google/uuid" + "google.golang.org/genproto/googleapis/rpc/code" + rpc "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/status" ) @@ -55,7 +57,7 @@ type Local struct { mu sync.Mutex root string plugins *plugin.Set - pluginCache []api.Plugin + pluginCache []*api.Plugin } var _ = (api.IntrospectionClient)(&Local{}) @@ -74,14 +76,13 @@ func (l *Local) Plugins(ctx context.Context, req *api.PluginsRequest, _ ...grpc. return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, err.Error()) } - var plugins []api.Plugin + var plugins []*api.Plugin allPlugins := l.getPlugins() for _, p := range allPlugins { - if !filter.Match(adaptPlugin(p)) { - continue + p := p + if filter.Match(adaptPlugin(p)) { + plugins = append(plugins, p) } - - plugins = append(plugins, p) } return &api.PluginsResponse{ @@ -89,7 +90,7 @@ func (l *Local) Plugins(ctx context.Context, req *api.PluginsRequest, _ ...grpc. }, nil } -func (l *Local) getPlugins() []api.Plugin { +func (l *Local) getPlugins() []*api.Plugin { l.mu.Lock() defer l.mu.Unlock() plugins := l.plugins.GetAll() @@ -105,8 +106,18 @@ func (l *Local) Server(ctx context.Context, _ *ptypes.Empty, _ ...grpc.CallOptio if err != nil { return nil, errdefs.ToGRPC(err) } + pid := os.Getpid() + var pidns uint64 + if runtime.GOOS == "linux" { + pidns, err = statPIDNS(pid) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + } return &api.ServerResponse{ - UUID: u, + UUID: u, + Pid: uint64(pid), + Pidns: pidns, }, nil } @@ -149,7 +160,7 @@ func (l *Local) uuidPath() string { } func adaptPlugin(o interface{}) filters.Adaptor { - obj := o.(api.Plugin) + obj := o.(*api.Plugin) return filters.AdapterFunc(func(fieldpath []string) (string, bool) { if len(fieldpath) == 0 { return "", false @@ -175,12 +186,12 @@ func adaptPlugin(o interface{}) filters.Adaptor { }) } -func pluginsToPB(plugins []*plugin.Plugin) []api.Plugin { - var pluginsPB []api.Plugin +func pluginsToPB(plugins []*plugin.Plugin) []*api.Plugin { + var pluginsPB []*api.Plugin for _, p := range plugins { - var platforms []types.Platform + var platforms []*types.Platform for _, p := range p.Meta.Platforms { - platforms = append(platforms, types.Platform{ + platforms = append(platforms, &types.Platform{ OS: p.OS, Architecture: p.Architecture, Variant: p.Variant, @@ -210,13 +221,13 @@ func pluginsToPB(plugins []*plugin.Plugin) []api.Plugin { } } else { initErr = &rpc.Status{ - Code: int32(rpc.UNKNOWN), + Code: int32(code.Code_UNKNOWN), Message: err.Error(), } } } - pluginsPB = append(pluginsPB, api.Plugin{ + pluginsPB = append(pluginsPB, &api.Plugin{ Type: p.Registration.Type.String(), ID: p.Registration.ID, Requires: requires, diff --git a/vendor/github.com/containerd/containerd/sys/fds.go b/vendor/github.com/containerd/containerd/services/introspection/pidns_linux.go similarity index 65% rename from vendor/github.com/containerd/containerd/sys/fds.go rename to vendor/github.com/containerd/containerd/services/introspection/pidns_linux.go index a71a9cd7e9..4dce1a5e7b 100644 --- a/vendor/github.com/containerd/containerd/sys/fds.go +++ b/vendor/github.com/containerd/containerd/services/introspection/pidns_linux.go @@ -1,6 +1,3 @@ -//go:build !windows && !darwin -// +build !windows,!darwin - /* Copyright The containerd Authors. @@ -17,19 +14,23 @@ limitations under the License. */ -package sys +package introspection import ( + "fmt" "os" - "path/filepath" - "strconv" + "syscall" ) -// GetOpenFds returns the number of open fds for the process provided by pid -func GetOpenFds(pid int) (int, error) { - dirs, err := os.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) +func statPIDNS(pid int) (uint64, error) { + f := fmt.Sprintf("/proc/%d/ns/pid", pid) + st, err := os.Stat(f) if err != nil { - return -1, err + return 0, err } - return len(dirs), nil + stSys, ok := st.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("%T is not *syscall.Stat_t", st.Sys()) + } + return stSys.Ino, nil } diff --git a/vendor/github.com/containerd/containerd/services/introspection/pidns_others.go b/vendor/github.com/containerd/containerd/services/introspection/pidns_others.go new file mode 100644 index 0000000000..c2cc475af3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/introspection/pidns_others.go @@ -0,0 +1,23 @@ +//go:build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package introspection + +func statPIDNS(pid int) (uint64, error) { + return 0, nil +} diff --git a/vendor/github.com/containerd/containerd/services/introspection/service.go b/vendor/github.com/containerd/containerd/services/introspection/service.go index c11b8dc1ce..60013b52c8 100644 --- a/vendor/github.com/containerd/containerd/services/introspection/service.go +++ b/vendor/github.com/containerd/containerd/services/introspection/service.go @@ -22,8 +22,8 @@ import ( api "github.com/containerd/containerd/api/services/introspection/v1" "github.com/containerd/containerd/plugin" + ptypes "github.com/containerd/containerd/protobuf/types" "github.com/containerd/containerd/services" - ptypes "github.com/gogo/protobuf/types" "google.golang.org/grpc" ) @@ -49,7 +49,7 @@ func init() { localClient, ok := i.(*Local) if !ok { - return nil, errors.New("Could not create a local client for introspection service") + return nil, errors.New("could not create a local client for introspection service") } localClient.UpdateLocal(ic.Root) @@ -62,6 +62,7 @@ func init() { type server struct { local api.IntrospectionClient + api.UnimplementedIntrospectionServer } var _ = (api.IntrospectionServer)(&server{}) diff --git a/vendor/github.com/containerd/containerd/services/server/config/config.go b/vendor/github.com/containerd/containerd/services/server/config/config.go index 4c475d43d7..340c93b31f 100644 --- a/vendor/github.com/containerd/containerd/services/server/config/config.go +++ b/vendor/github.com/containerd/containerd/services/server/config/config.go @@ -104,17 +104,17 @@ func (c *Config) ValidateV2() error { return nil } for _, p := range c.DisabledPlugins { - if len(strings.Split(p, ".")) < 4 { + if !strings.HasPrefix(p, "io.containerd.") || len(strings.SplitN(p, ".", 4)) < 4 { return fmt.Errorf("invalid disabled plugin URI %q expect io.containerd.x.vx", p) } } for _, p := range c.RequiredPlugins { - if len(strings.Split(p, ".")) < 4 { + if !strings.HasPrefix(p, "io.containerd.") || len(strings.SplitN(p, ".", 4)) < 4 { return fmt.Errorf("invalid required plugin URI %q expect io.containerd.x.vx", p) } } for p := range c.Plugins { - if len(strings.Split(p, ".")) < 4 { + if !strings.HasPrefix(p, "io.containerd.") || len(strings.SplitN(p, ".", 4)) < 4 { return fmt.Errorf("invalid plugin key URI %q expect io.containerd.x.vx", p) } } @@ -147,7 +147,7 @@ type Debug struct { UID int `toml:"uid"` GID int `toml:"gid"` Level string `toml:"level"` - // Format represents the logging format + // Format represents the logging format. Supported values are 'text' and 'json'. Format string `toml:"format"` } @@ -168,44 +168,6 @@ type ProxyPlugin struct { Address string `toml:"address"` } -// BoltConfig defines the configuration values for the bolt plugin, which is -// loaded here, rather than back registered in the metadata package. -type BoltConfig struct { - // ContentSharingPolicy sets the sharing policy for content between - // namespaces. - // - // The default mode "shared" will make blobs available in all - // namespaces once it is pulled into any namespace. The blob will be pulled - // into the namespace if a writer is opened with the "Expected" digest that - // is already present in the backend. - // - // The alternative mode, "isolated" requires that clients prove they have - // access to the content by providing all of the content to the ingest - // before the blob is added to the namespace. - // - // Both modes share backing data, while "shared" will reduce total - // bandwidth across namespaces, at the cost of allowing access to any blob - // just by knowing its digest. - ContentSharingPolicy string `toml:"content_sharing_policy"` -} - -const ( - // SharingPolicyShared represents the "shared" sharing policy - SharingPolicyShared = "shared" - // SharingPolicyIsolated represents the "isolated" sharing policy - SharingPolicyIsolated = "isolated" -) - -// Validate validates if BoltConfig is valid -func (bc *BoltConfig) Validate() error { - switch bc.ContentSharingPolicy { - case SharingPolicyShared, SharingPolicyIsolated: - return nil - default: - return fmt.Errorf("unknown policy: %s: %w", bc.ContentSharingPolicy, errdefs.ErrInvalidArgument) - } -} - // Decode unmarshals a plugin specific configuration by plugin id func (c *Config) Decode(p *plugin.Registration) (interface{}, error) { id := p.URI() diff --git a/vendor/github.com/containerd/containerd/services/services.go b/vendor/github.com/containerd/containerd/services/services.go index 27f47a5cef..d7255169f1 100644 --- a/vendor/github.com/containerd/containerd/services/services.go +++ b/vendor/github.com/containerd/containerd/services/services.go @@ -29,10 +29,10 @@ const ( TasksService = "tasks-service" // NamespacesService is id of namespaces service. NamespacesService = "namespaces-service" - // LeasesService is id of leases service. - LeasesService = "leases-service" // DiffService is id of diff service. DiffService = "diff-service" // IntrospectionService is the id of introspection service IntrospectionService = "introspection-service" + // Streaming service is the id of the streaming service + StreamingService = "streaming-service" ) diff --git a/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go b/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go index 0eb6b5a155..726c085a93 100644 --- a/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go +++ b/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/snapshots/proxy/proxy.go b/vendor/github.com/containerd/containerd/snapshots/proxy/proxy.go index 00c320c608..3ef3b2698e 100644 --- a/vendor/github.com/containerd/containerd/snapshots/proxy/proxy.go +++ b/vendor/github.com/containerd/containerd/snapshots/proxy/proxy.go @@ -24,8 +24,9 @@ import ( "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/protobuf" + protobuftypes "github.com/containerd/containerd/protobuf/types" "github.com/containerd/containerd/snapshots" - protobuftypes "github.com/gogo/protobuf/types" ) // NewSnapshotter returns a new Snapshotter which communicates over a GRPC @@ -192,22 +193,22 @@ func (p *proxySnapshotter) Cleanup(ctx context.Context) error { } func toKind(kind snapshotsapi.Kind) snapshots.Kind { - if kind == snapshotsapi.KindActive { + if kind == snapshotsapi.Kind_ACTIVE { return snapshots.KindActive } - if kind == snapshotsapi.KindView { + if kind == snapshotsapi.Kind_VIEW { return snapshots.KindView } return snapshots.KindCommitted } -func toInfo(info snapshotsapi.Info) snapshots.Info { +func toInfo(info *snapshotsapi.Info) snapshots.Info { return snapshots.Info{ Name: info.Name, Parent: info.Parent, Kind: toKind(info.Kind), - Created: info.CreatedAt, - Updated: info.UpdatedAt, + Created: protobuf.FromTimestamp(info.CreatedAt), + Updated: protobuf.FromTimestamp(info.UpdatedAt), Labels: info.Labels, } } @@ -215,7 +216,7 @@ func toInfo(info snapshotsapi.Info) snapshots.Info { func toUsage(resp *snapshotsapi.UsageResponse) snapshots.Usage { return snapshots.Usage{ Inodes: resp.Inodes, - Size: resp.Size_, + Size: resp.Size, } } @@ -225,6 +226,7 @@ func toMounts(mm []*types.Mount) []mount.Mount { mounts[i] = mount.Mount{ Type: m.Type, Source: m.Source, + Target: m.Target, Options: m.Options, } } @@ -233,21 +235,21 @@ func toMounts(mm []*types.Mount) []mount.Mount { func fromKind(kind snapshots.Kind) snapshotsapi.Kind { if kind == snapshots.KindActive { - return snapshotsapi.KindActive + return snapshotsapi.Kind_ACTIVE } if kind == snapshots.KindView { - return snapshotsapi.KindView + return snapshotsapi.Kind_VIEW } - return snapshotsapi.KindCommitted + return snapshotsapi.Kind_COMMITTED } -func fromInfo(info snapshots.Info) snapshotsapi.Info { - return snapshotsapi.Info{ +func fromInfo(info snapshots.Info) *snapshotsapi.Info { + return &snapshotsapi.Info{ Name: info.Name, Parent: info.Parent, Kind: fromKind(info.Kind), - CreatedAt: info.Created, - UpdatedAt: info.Updated, + CreatedAt: protobuf.ToTimestamp(info.Created), + UpdatedAt: protobuf.ToTimestamp(info.Updated), Labels: info.Labels, } } diff --git a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go index e144fb1583..5fa5aa530c 100644 --- a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go +++ b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go @@ -33,6 +33,11 @@ const ( UnpackKeyFormat = UnpackKeyPrefix + "-%s %s" inheritedLabelsPrefix = "containerd.io/snapshot/" labelSnapshotRef = "containerd.io/snapshot.ref" + + // LabelSnapshotUIDMapping is the label used for UID mappings + LabelSnapshotUIDMapping = "containerd.io/snapshot/uidmapping" + // LabelSnapshotGIDMapping is the label used for GID mappings + LabelSnapshotGIDMapping = "containerd.io/snapshot/gidmapping" ) // Kind identifies the kind of snapshot. @@ -94,7 +99,7 @@ func (k *Kind) UnmarshalJSON(b []byte) error { } // Info provides information about a particular snapshot. -// JSON marshallability is supported for interactive with tools like ctr, +// JSON marshalling is supported for interacting with tools like ctr, type Info struct { Kind Kind // active or committed snapshot Name string // name or key of snapshot @@ -102,8 +107,8 @@ type Info struct { // Labels for a snapshot. // - // Note: only labels prefixed with `containerd.io/snapshot/` will be inherited by the - // snapshotter's `Prepare`, `View`, or `Commit` calls. + // Note: only labels prefixed with `containerd.io/snapshot/` will be inherited + // by the snapshotter's `Prepare`, `View`, or `Commit` calls. Labels map[string]string `json:",omitempty"` Created time.Time `json:",omitempty"` // Created time Updated time.Time `json:",omitempty"` // Last update time @@ -122,7 +127,7 @@ type Usage struct { func (u *Usage) Add(other Usage) { u.Size += other.Size - // TODO(stevvooe): assumes independent inodes, but provides and upper + // TODO(stevvooe): assumes independent inodes, but provides an upper // bound. This should be pretty close, assuming the inodes for a // snapshot are roughly unique to it. Don't trust this assumption. u.Inodes += other.Inodes @@ -165,19 +170,18 @@ type WalkFunc func(context.Context, Info) error // same key space. For example, an active snapshot may not share the same key // with a committed snapshot. // -// We cover several examples below to demonstrate the utility of a snapshot -// snapshotter. +// We cover several examples below to demonstrate the utility of the snapshotter. // // # Importing a Layer // -// To import a layer, we simply have the Snapshotter provide a list of +// To import a layer, we simply have the snapshotter provide a list of // mounts to be applied such that our dst will capture a changeset. We start // out by getting a path to the layer tar file and creating a temp location to // unpack it to: // // layerPath, tmpDir := getLayerPath(), mkTmpDir() // just a path to layer tar file. // -// We start by using a Snapshotter to Prepare a new snapshot transaction, using a +// We start by using the snapshotter to Prepare a new snapshot transaction, using a // key and descending from the empty parent "". To prevent our layer from being // garbage collected during unpacking, we add the `containerd.io/gc.root` label: // @@ -187,7 +191,7 @@ type WalkFunc func(context.Context, Info) error // mounts, err := snapshotter.Prepare(ctx, key, "", noGcOpt) // if err != nil { ... } // -// We get back a list of mounts from Snapshotter.Prepare, with the key identifying +// We get back a list of mounts from snapshotter.Prepare(), with the key identifying // the active snapshot. Mount this to the temporary location with the // following: // @@ -205,7 +209,7 @@ type WalkFunc func(context.Context, Info) error // digest, err := unpackLayer(tmpLocation, layer) // unpack into layer location // if err != nil { ... } // -// When the above completes, we should have a filesystem the represents the +// When the above completes, we should have a filesystem that represents the // contents of the layer. Careful implementations should verify that digest // matches the expected DiffID. When completed, we unmount the mounts: // @@ -218,14 +222,14 @@ type WalkFunc func(context.Context, Info) error // // if err := snapshotter.Commit(ctx, digest.String(), key, noGcOpt); err != nil { ... } // -// Now, we have a layer in the Snapshotter that can be accessed with the digest +// Now, we have a layer in the snapshotter that can be accessed with the digest // provided during commit. // // # Importing the Next Layer // // Making a layer depend on the above is identical to the process described // above except that the parent is provided as parent when calling -// Manager.Prepare, assuming a clean, unique key identifier: +// snapshotter.Prepare(), assuming a clean, unique key identifier: // // mounts, err := snapshotter.Prepare(ctx, key, parentDigest, noGcOpt) // @@ -234,20 +238,20 @@ type WalkFunc func(context.Context, Info) error // // # Running a Container // -// To run a container, we simply provide Snapshotter.Prepare the committed image +// To run a container, we simply provide snapshotter.Prepare() the committed image // snapshot as the parent. After mounting, the prepared path can // be used directly as the container's filesystem: // // mounts, err := snapshotter.Prepare(ctx, containerKey, imageRootFSChainID) // // The returned mounts can then be passed directly to the container runtime. If -// one would like to create a new image from the filesystem, Manager.Commit is +// one would like to create a new image from the filesystem, snapshotter.Commit() is // called: // // if err := snapshotter.Commit(ctx, newImageSnapshot, containerKey); err != nil { ... } // -// Alternatively, for most container runs, Snapshotter.Remove will be called to -// signal the Snapshotter to abandon the changes. +// Alternatively, for most container runs, snapshotter.Remove() will be called to +// signal the snapshotter to abandon the changes. type Snapshotter interface { // Stat returns the info for an active or committed snapshot by name or // key. @@ -267,12 +271,12 @@ type Snapshotter interface { // The running time of this call for active snapshots is dependent on // implementation, but may be proportional to the size of the resource. // Callers should take this into consideration. Implementations should - // attempt to honer context cancellation and avoid taking locks when making + // attempt to honor context cancellation and avoid taking locks when making // the calculation. Usage(ctx context.Context, key string) (Usage, error) // Mounts returns the mounts for the active snapshot transaction identified - // by key. Can be called on an read-write or readonly transaction. This is + // by key. Can be called on a read-write or readonly transaction. This is // available only for active snapshots. // // This can be used to recover mounts after calling View or Prepare. @@ -298,7 +302,7 @@ type Snapshotter interface { // committed back to the snapshot snapshotter. View returns a readonly view on // the parent, with the active snapshot being tracked by the given key. // - // This method operates identically to Prepare, except that Mounts returned + // This method operates identically to Prepare, except the mounts returned // may have the readonly flag set. Any modifications to the underlying // filesystem will be ignored. Implementations may perform this in a more // efficient manner that differs from what would be attempted with diff --git a/vendor/github.com/containerd/containerd/snapshotter_default_unix.go b/vendor/github.com/containerd/containerd/snapshotter_default_unix.go index dcba4792cc..8e191ca6ac 100644 --- a/vendor/github.com/containerd/containerd/snapshotter_default_unix.go +++ b/vendor/github.com/containerd/containerd/snapshotter_default_unix.go @@ -1,5 +1,4 @@ //go:build darwin || freebsd || solaris -// +build darwin freebsd solaris /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/snapshotter_opts_unix.go b/vendor/github.com/containerd/containerd/snapshotter_opts_unix.go index 2a2c829f01..4739e192fd 100644 --- a/vendor/github.com/containerd/containerd/snapshotter_opts_unix.go +++ b/vendor/github.com/containerd/containerd/snapshotter_opts_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -20,17 +19,92 @@ package containerd import ( + "context" "fmt" "github.com/containerd/containerd/snapshots" ) +const ( + capabRemapIDs = "remap-ids" +) + // WithRemapperLabels creates the labels used by any supporting snapshotter // to shift the filesystem ownership (user namespace mapping) automatically; currently // supported by the fuse-overlayfs snapshotter func WithRemapperLabels(ctrUID, hostUID, ctrGID, hostGID, length uint32) snapshots.Opt { return snapshots.WithLabels(map[string]string{ - "containerd.io/snapshot/uidmapping": fmt.Sprintf("%d:%d:%d", ctrUID, hostUID, length), - "containerd.io/snapshot/gidmapping": fmt.Sprintf("%d:%d:%d", ctrGID, hostGID, length), - }) + snapshots.LabelSnapshotUIDMapping: fmt.Sprintf("%d:%d:%d", ctrUID, hostUID, length), + snapshots.LabelSnapshotGIDMapping: fmt.Sprintf("%d:%d:%d", ctrGID, hostGID, length)}) +} + +func resolveSnapshotOptions(ctx context.Context, client *Client, snapshotterName string, snapshotter snapshots.Snapshotter, parent string, opts ...snapshots.Opt) (string, error) { + capabs, err := client.GetSnapshotterCapabilities(ctx, snapshotterName) + if err != nil { + return "", err + } + + for _, capab := range capabs { + if capab == capabRemapIDs { + // Snapshotter supports ID remapping, we don't need to do anything. + return parent, nil + } + } + + var local snapshots.Info + for _, opt := range opts { + opt(&local) + } + + needsRemap := false + var uidMap, gidMap string + + if value, ok := local.Labels[snapshots.LabelSnapshotUIDMapping]; ok { + needsRemap = true + uidMap = value + } + if value, ok := local.Labels[snapshots.LabelSnapshotGIDMapping]; ok { + needsRemap = true + gidMap = value + } + + if !needsRemap { + return parent, nil + } + + var ctrUID, hostUID, length uint32 + _, err = fmt.Sscanf(uidMap, "%d:%d:%d", &ctrUID, &hostUID, &length) + if err != nil { + return "", fmt.Errorf("uidMap unparsable: %w", err) + } + + var ctrGID, hostGID, lengthGID uint32 + _, err = fmt.Sscanf(gidMap, "%d:%d:%d", &ctrGID, &hostGID, &lengthGID) + if err != nil { + return "", fmt.Errorf("gidMap unparsable: %w", err) + } + + if ctrUID != 0 || ctrGID != 0 { + return "", fmt.Errorf("Container UID/GID of 0 only supported currently (%d/%d)", ctrUID, ctrGID) + } + + // TODO(dgl): length isn't taken into account for the intermediate snapshot id. + usernsID := fmt.Sprintf("%s-%d-%d", parent, hostUID, hostGID) + if _, err := snapshotter.Stat(ctx, usernsID); err == nil { + return usernsID, nil + } + mounts, err := snapshotter.Prepare(ctx, usernsID+"-remap", parent) + if err != nil { + return "", err + } + // TODO(dgl): length isn't taken into account here yet either. + if err := remapRootFS(ctx, mounts, hostUID, hostGID); err != nil { + snapshotter.Remove(ctx, usernsID+"-remap") + return "", err + } + if err := snapshotter.Commit(ctx, usernsID, usernsID+"-remap"); err != nil { + return "", err + } + + return usernsID, nil } diff --git a/vendor/github.com/containerd/containerd/snapshotter_opts_windows.go b/vendor/github.com/containerd/containerd/snapshotter_opts_windows.go new file mode 100644 index 0000000000..540bcb3130 --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshotter_opts_windows.go @@ -0,0 +1,27 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containerd + +import ( + "context" + + "github.com/containerd/containerd/snapshots" +) + +func resolveSnapshotOptions(ctx context.Context, client *Client, snapshotterName string, snapshotter snapshots.Snapshotter, parent string, opts ...snapshots.Opt) (string, error) { + return parent, nil +} diff --git a/vendor/github.com/containerd/containerd/sys/filesys_deprecated_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_deprecated_windows.go new file mode 100644 index 0000000000..a59edabeb6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/filesys_deprecated_windows.go @@ -0,0 +1,51 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + "os" + + "github.com/moby/sys/sequential" +) + +// CreateSequential is deprecated. +// +// Deprecated: use github.com/moby/sys/sequential.Create +func CreateSequential(name string) (*os.File, error) { + return sequential.Create(name) +} + +// OpenSequential is deprecated. +// +// Deprecated: use github.com/moby/sys/sequential.Open +func OpenSequential(name string) (*os.File, error) { + return sequential.Open(name) +} + +// OpenFileSequential is deprecated. +// +// Deprecated: use github.com/moby/sys/sequential.OpenFile +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return sequential.OpenFile(name, flag, perm) +} + +// TempFileSequential is deprecated. +// +// Deprecated: use github.com/moby/sys/sequential.CreateTemp +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return sequential.CreateTemp(dir, prefix) +} diff --git a/vendor/github.com/containerd/containerd/sys/filesys_unix.go b/vendor/github.com/containerd/containerd/sys/filesys_unix.go index 805a7a736f..333e85ceb4 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_unix.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -21,11 +20,6 @@ package sys import "os" -// ForceRemoveAll on unix is just a wrapper for os.RemoveAll -func ForceRemoveAll(path string) error { - return os.RemoveAll(path) -} - // MkdirAllWithACL is a wrapper for os.MkdirAll on Unix systems. func MkdirAllWithACL(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) diff --git a/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_windows.go index 87ebacc200..67fc4048c1 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_windows.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_windows.go @@ -17,42 +17,44 @@ package sys import ( - "fmt" "os" - "path/filepath" "regexp" - "sort" - "strconv" - "strings" "syscall" "unsafe" - "github.com/Microsoft/hcsshim" "golang.org/x/sys/windows" ) -const ( - // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System - SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" -) +// SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System. +const SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return mkdirall(path, true) +// volumePath is a regular expression to check if a path is a Windows +// volume path (e.g., "\\?\Volume{4c1b02c1-d990-11dc-99ae-806e6f6e6963}" +// or "\\?\Volume{4c1b02c1-d990-11dc-99ae-806e6f6e6963}\"). +var volumePath = regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}\\?$`) + +// MkdirAllWithACL is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and to create a directory +// an appropriate SDDL defined ACL for Builtin Administrators and Local System. +func MkdirAllWithACL(path string, _ os.FileMode) error { + sa, err := makeSecurityAttributes(SddlAdministratorsLocalSystem) + if err != nil { + return &os.PathError{Op: "mkdirall", Path: path, Err: err} + } + return mkdirall(path, sa) } -// MkdirAll implementation that is volume path aware for Windows. It can be used -// as a drop-in replacement for os.MkdirAll() +// MkdirAll is a custom version of os.MkdirAll that is volume path aware for +// Windows. It can be used as a drop-in replacement for os.MkdirAll. func MkdirAll(path string, _ os.FileMode) error { - return mkdirall(path, false) + return mkdirall(path, nil) } // mkdirall is a custom version of os.MkdirAll modified for use on Windows // so that it is both volume path aware, and can create a directory with // a DACL. -func mkdirall(path string, adminAndLocalSystem bool) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { +func mkdirall(path string, perm *windows.SecurityAttributes) error { + if volumePath.MatchString(path) { return nil } @@ -65,11 +67,7 @@ func mkdirall(path string, adminAndLocalSystem bool) error { if dir.IsDir() { return nil } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} } // Slow path: make sure parent exists and then call Mkdir for path. @@ -84,20 +82,15 @@ func mkdirall(path string, adminAndLocalSystem bool) error { } if j > 1 { - // Create parent - err = mkdirall(path[0:j-1], adminAndLocalSystem) + // Create parent. + err = mkdirall(fixRootDirectory(path[:j-1]), perm) if err != nil { return err } } - // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. - if adminAndLocalSystem { - err = mkdirWithACL(path) - } else { - err = os.Mkdir(path, 0) - } - + // Parent now exists; invoke Mkdir and use its result. + err = mkdirWithACL(path, perm) if err != nil { // Handle arguments like "foo/." by // double-checking that directory doesn't exist. @@ -117,229 +110,42 @@ func mkdirall(path string, adminAndLocalSystem bool) error { // in golang to cater for creating a directory am ACL permitting full // access, with inheritance, to any subfolder/file for Built-in Administrators // and Local System. -func mkdirWithACL(name string) error { - sa := windows.SecurityAttributes{Length: 0} - sd, err := windows.SecurityDescriptorFromString(SddlAdministratorsLocalSystem) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} +func mkdirWithACL(name string, sa *windows.SecurityAttributes) error { + if sa == nil { + return os.Mkdir(name, 0) } - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - sa.SecurityDescriptor = sd namep, err := windows.UTF16PtrFromString(name) if err != nil { return &os.PathError{Op: "mkdir", Path: name, Err: err} } - e := windows.CreateDirectory(namep, &sa) - if e != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: e} + err = windows.CreateDirectory(namep, sa) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} } return nil } -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false +// fixRootDirectory fixes a reference to a drive's root directory to +// have the required trailing slash. +func fixRootDirectory(p string) string { + if len(p) == len(`\\?\c:`) { + if os.IsPathSeparator(p[0]) && os.IsPathSeparator(p[1]) && p[2] == '?' && os.IsPathSeparator(p[3]) && p[5] == ':' { + return p + `\` } } - return true + return p } -// The origin of the functions below here are the golang OS and windows packages, -// slightly modified to only cope with files, not directories due to the -// specific use case. -// -// The alteration is to allow a file on Windows to be opened with -// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating -// the standby list, particularly when accessing large files such as layer.tar. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDONLY, 0) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, errf := windowsOpenFileSequential(name, flag, 0) - if errf == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: errf} -} - -func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil -} - -func makeInheritSa() *windows.SecurityAttributes { +func makeSecurityAttributes(sddl string) (*windows.SecurityAttributes, error) { var sa windows.SecurityAttributes sa.Length = uint32(unsafe.Sizeof(sa)) sa.InheritHandle = 1 - return &sa -} - -func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { - if len(path) == 0 { - return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND - } - pathp, err := windows.UTF16PtrFromString(path) + var err error + sa.SecurityDescriptor, err = windows.SecurityDescriptorFromString(sddl) if err != nil { - return windows.InvalidHandle, err + return nil, err } - var access uint32 - switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { - case windows.O_RDONLY: - access = windows.GENERIC_READ - case windows.O_WRONLY: - access = windows.GENERIC_WRITE - case windows.O_RDWR: - access = windows.GENERIC_READ | windows.GENERIC_WRITE - } - if mode&windows.O_CREAT != 0 { - access |= windows.GENERIC_WRITE - } - if mode&windows.O_APPEND != 0 { - access &^= windows.GENERIC_WRITE - access |= windows.FILE_APPEND_DATA - } - sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) - var sa *windows.SecurityAttributes - if mode&windows.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): - createmode = windows.CREATE_NEW - case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): - createmode = windows.CREATE_ALWAYS - case mode&windows.O_CREAT == windows.O_CREAT: - createmode = windows.OPEN_ALWAYS - case mode&windows.O_TRUNC == windows.O_TRUNC: - createmode = windows.TRUNCATE_EXISTING - default: - createmode = windows.OPEN_EXISTING - } - // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) - return h, e -} - -// ForceRemoveAll is the same as os.RemoveAll, but is aware of io.containerd.snapshotter.v1.windows -// and uses hcsshim to unmount and delete container layers contained therein, in the correct order, -// when passed a containerd root data directory (i.e. the `--root` directory for containerd). -func ForceRemoveAll(path string) error { - // snapshots/windows/windows.go init() - const snapshotPlugin = "io.containerd.snapshotter.v1" + "." + "windows" - // snapshots/windows/windows.go NewSnapshotter() - snapshotDir := filepath.Join(path, snapshotPlugin, "snapshots") - if stat, err := os.Stat(snapshotDir); err == nil && stat.IsDir() { - if err := cleanupWCOWLayers(snapshotDir); err != nil { - return fmt.Errorf("failed to cleanup WCOW layers in %s: %w", snapshotDir, err) - } - } - - return os.RemoveAll(path) -} - -func cleanupWCOWLayers(root string) error { - // See snapshots/windows/windows.go getSnapshotDir() - var layerNums []int - var rmLayerNums []int - if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if path != root && info.IsDir() { - name := filepath.Base(path) - if strings.HasPrefix(name, "rm-") { - layerNum, err := strconv.Atoi(strings.TrimPrefix(name, "rm-")) - if err != nil { - return err - } - rmLayerNums = append(rmLayerNums, layerNum) - } else { - layerNum, err := strconv.Atoi(name) - if err != nil { - return err - } - layerNums = append(layerNums, layerNum) - } - return filepath.SkipDir - } - - return nil - }); err != nil { - return err - } - - sort.Sort(sort.Reverse(sort.IntSlice(rmLayerNums))) - for _, rmLayerNum := range rmLayerNums { - if err := cleanupWCOWLayer(filepath.Join(root, "rm-"+strconv.Itoa(rmLayerNum))); err != nil { - return err - } - } - - sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) - for _, layerNum := range layerNums { - if err := cleanupWCOWLayer(filepath.Join(root, strconv.Itoa(layerNum))); err != nil { - return err - } - } - - return nil -} - -func cleanupWCOWLayer(layerPath string) error { - info := hcsshim.DriverInfo{ - HomeDir: filepath.Dir(layerPath), - } - - // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared or activated. - // ERROR_FLT_INSTANCE_NOT_FOUND is returned if the layer is currently activated but not prepared. - if err := hcsshim.UnprepareLayer(info, filepath.Base(layerPath)); err != nil { - if hcserror, ok := err.(*hcsshim.HcsError); !ok || (hcserror.Err != windows.ERROR_DEV_NOT_EXIST && hcserror.Err != syscall.Errno(windows.ERROR_FLT_INSTANCE_NOT_FOUND)) { - return fmt.Errorf("failed to unprepare %s: %w", layerPath, err) - } - } - - if err := hcsshim.DeactivateLayer(info, filepath.Base(layerPath)); err != nil { - return fmt.Errorf("failed to deactivate %s: %w", layerPath, err) - } - - if err := hcsshim.DestroyLayer(info, filepath.Base(layerPath)); err != nil { - return fmt.Errorf("failed to destroy %s: %w", layerPath, err) - } - - return nil + return &sa, nil } diff --git a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go index fa0db5a10e..f579774663 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go +++ b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go index 6c4f13b908..8172f224e8 100644 --- a/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go +++ b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -91,7 +90,7 @@ type Monitor struct { subscribers map[chan runc.Exit]*subscriber } -// Start starts the command a registers the process with the reaper +// Start starts the command and registers the process with the reaper func (m *Monitor) Start(c *exec.Cmd) (chan runc.Exit, error) { ec := m.Subscribe() if err := c.Start(); err != nil { diff --git a/vendor/github.com/containerd/containerd/sys/socket_unix.go b/vendor/github.com/containerd/containerd/sys/socket_unix.go index 367e19cad8..5ecbeddc91 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_unix.go +++ b/vendor/github.com/containerd/containerd/sys/socket_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/task.go b/vendor/github.com/containerd/containerd/task.go index ef8cd44942..9667a1cf5a 100644 --- a/vendor/github.com/containerd/containerd/task.go +++ b/vendor/github.com/containerd/containerd/task.go @@ -38,11 +38,12 @@ import ( "github.com/containerd/containerd/mount" "github.com/containerd/containerd/oci" "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/protobuf" + google_protobuf "github.com/containerd/containerd/protobuf/types" "github.com/containerd/containerd/rootfs" "github.com/containerd/containerd/runtime/linux/runctypes" "github.com/containerd/containerd/runtime/v2/runc/options" - "github.com/containerd/typeurl" - google_protobuf "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" digest "github.com/opencontainers/go-digest" is "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -269,7 +270,7 @@ func (t *task) Status(ctx context.Context) (Status, error) { return Status{ Status: ProcessStatus(strings.ToLower(r.Process.Status.String())), ExitStatus: r.Process.ExitStatus, - ExitTime: r.Process.ExitedAt, + ExitTime: protobuf.FromTimestamp(r.Process.ExitedAt), }, nil } @@ -289,7 +290,7 @@ func (t *task) Wait(ctx context.Context) (<-chan ExitStatus, error) { } c <- ExitStatus{ code: r.ExitStatus, - exitedAt: r.ExitedAt, + exitedAt: protobuf.FromTimestamp(r.ExitedAt), } }() return c, nil @@ -348,7 +349,7 @@ func (t *task) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitStat if t.io != nil { t.io.Close() } - return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil + return &ExitStatus{code: r.ExitStatus, exitedAt: protobuf.FromTimestamp(r.ExitedAt)}, nil } func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate cio.Creator) (_ Process, err error) { @@ -365,7 +366,7 @@ func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreat i.Close() } }() - any, err := typeurl.MarshalAny(spec) + any, err := protobuf.MarshalAnyToProto(spec) if err != nil { return nil, err } @@ -463,9 +464,9 @@ func (t *task) Checkpoint(ctx context.Context, opts ...CheckpointTaskOpts) (Imag if i.Name == "" { i.Name = fmt.Sprintf(checkpointNameFormat, t.id, time.Now().Format(checkpointDateFormat)) } - request.ParentCheckpoint = i.ParentCheckpoint + request.ParentCheckpoint = i.ParentCheckpoint.String() if i.Options != nil { - any, err := typeurl.MarshalAny(i.Options) + any, err := protobuf.MarshalAnyToProto(i.Options) if err != nil { return nil, err } @@ -554,7 +555,7 @@ func (t *task) Update(ctx context.Context, opts ...UpdateTaskOpts) error { if err != nil { return err } - request.Resources = any + request.Resources = protobuf.FromAny(any) } if i.Annotations != nil { request.Annotations = i.Annotations @@ -622,8 +623,8 @@ func (t *task) checkpointTask(ctx context.Context, index *v1.Index, request *tas for _, d := range response.Descriptors { index.Manifests = append(index.Manifests, v1.Descriptor{ MediaType: d.MediaType, - Size: d.Size_, - Digest: d.Digest, + Size: d.Size, + Digest: digest.Digest(d.Digest), Platform: &v1.Platform{ OS: goruntime.GOOS, Architecture: goruntime.GOARCH, diff --git a/vendor/github.com/containerd/containerd/task_opts.go b/vendor/github.com/containerd/containerd/task_opts.go index 67e6527325..da269016e4 100644 --- a/vendor/github.com/containerd/containerd/task_opts.go +++ b/vendor/github.com/containerd/containerd/task_opts.go @@ -69,8 +69,8 @@ func WithTaskCheckpoint(im Image) NewTaskOpts { if m.MediaType == images.MediaTypeContainerd1Checkpoint { info.Checkpoint = &types.Descriptor{ MediaType: m.MediaType, - Size_: m.Size, - Digest: m.Digest, + Size: m.Size, + Digest: m.Digest.String(), Annotations: m.Annotations, } return nil diff --git a/vendor/github.com/containerd/containerd/task_opts_unix.go b/vendor/github.com/containerd/containerd/task_opts_unix.go index 1d5983b629..5e5e66a6f8 100644 --- a/vendor/github.com/containerd/containerd/task_opts_unix.go +++ b/vendor/github.com/containerd/containerd/task_opts_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/tracing/helpers.go b/vendor/github.com/containerd/containerd/tracing/helpers.go new file mode 100644 index 0000000000..981da6c795 --- /dev/null +++ b/vendor/github.com/containerd/containerd/tracing/helpers.go @@ -0,0 +1,94 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tracing + +import ( + "encoding/json" + "fmt" + "strings" + + "go.opentelemetry.io/otel/attribute" +) + +const ( + spanDelimiter = "." +) + +func makeSpanName(names ...string) string { + return strings.Join(names, spanDelimiter) +} + +func any(k string, v interface{}) attribute.KeyValue { + if v == nil { + return attribute.String(k, "") + } + + switch typed := v.(type) { + case bool: + return attribute.Bool(k, typed) + case []bool: + return attribute.BoolSlice(k, typed) + case int: + return attribute.Int(k, typed) + case []int: + return attribute.IntSlice(k, typed) + case int8: + return attribute.Int(k, int(typed)) + case []int8: + ls := make([]int, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int(i)) + } + return attribute.IntSlice(k, ls) + case int16: + return attribute.Int(k, int(typed)) + case []int16: + ls := make([]int, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int(i)) + } + return attribute.IntSlice(k, ls) + case int32: + return attribute.Int64(k, int64(typed)) + case []int32: + ls := make([]int64, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int64(i)) + } + return attribute.Int64Slice(k, ls) + case int64: + return attribute.Int64(k, typed) + case []int64: + return attribute.Int64Slice(k, typed) + case float64: + return attribute.Float64(k, typed) + case []float64: + return attribute.Float64Slice(k, typed) + case string: + return attribute.String(k, typed) + case []string: + return attribute.StringSlice(k, typed) + } + + if stringer, ok := v.(fmt.Stringer); ok { + return attribute.String(k, stringer.String()) + } + if b, err := json.Marshal(v); b != nil && err == nil { + return attribute.String(k, string(b)) + } + return attribute.String(k, fmt.Sprintf("%v", v)) +} diff --git a/vendor/github.com/containerd/containerd/tracing/log.go b/vendor/github.com/containerd/containerd/tracing/log.go index 6c6dd6d7e6..98fa16f931 100644 --- a/vendor/github.com/containerd/containerd/tracing/log.go +++ b/vendor/github.com/containerd/containerd/tracing/log.go @@ -17,9 +17,6 @@ package tracing import ( - "encoding/json" - "fmt" - "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -67,64 +64,3 @@ func logrusDataToAttrs(data logrus.Fields) []attribute.KeyValue { } return attrs } - -func any(k string, v interface{}) attribute.KeyValue { - if v == nil { - return attribute.String(k, "") - } - - switch typed := v.(type) { - case bool: - return attribute.Bool(k, typed) - case []bool: - return attribute.BoolSlice(k, typed) - case int: - return attribute.Int(k, typed) - case []int: - return attribute.IntSlice(k, typed) - case int8: - return attribute.Int(k, int(typed)) - case []int8: - ls := make([]int, 0, len(typed)) - for _, i := range typed { - ls = append(ls, int(i)) - } - return attribute.IntSlice(k, ls) - case int16: - return attribute.Int(k, int(typed)) - case []int16: - ls := make([]int, 0, len(typed)) - for _, i := range typed { - ls = append(ls, int(i)) - } - return attribute.IntSlice(k, ls) - case int32: - return attribute.Int64(k, int64(typed)) - case []int32: - ls := make([]int64, 0, len(typed)) - for _, i := range typed { - ls = append(ls, int64(i)) - } - return attribute.Int64Slice(k, ls) - case int64: - return attribute.Int64(k, typed) - case []int64: - return attribute.Int64Slice(k, typed) - case float64: - return attribute.Float64(k, typed) - case []float64: - return attribute.Float64Slice(k, typed) - case string: - return attribute.String(k, typed) - case []string: - return attribute.StringSlice(k, typed) - } - - if stringer, ok := v.(fmt.Stringer); ok { - return attribute.String(k, stringer.String()) - } - if b, err := json.Marshal(v); b != nil && err == nil { - return attribute.String(k, string(b)) - } - return attribute.String(k, fmt.Sprintf("%v", v)) -} diff --git a/vendor/github.com/containerd/containerd/tracing/tracing.go b/vendor/github.com/containerd/containerd/tracing/tracing.go index d3ecfb5f9b..7fe7bfd5bf 100644 --- a/vendor/github.com/containerd/containerd/tracing/tracing.go +++ b/vendor/github.com/containerd/containerd/tracing/tracing.go @@ -18,20 +18,100 @@ package tracing import ( "context" + "net/http" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + httpconv "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv" "go.opentelemetry.io/otel/trace" ) -// StartSpan starts child span in a context. -func StartSpan(ctx context.Context, opName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - if parent := trace.SpanFromContext(ctx); parent != nil && parent.SpanContext().IsValid() { - return parent.TracerProvider().Tracer("").Start(ctx, opName, opts...) - } - return otel.Tracer("").Start(ctx, opName, opts...) +// StartConfig defines configuration for a new span object. +type StartConfig struct { + spanOpts []trace.SpanStartOption } -// StopSpan ends the span specified -func StopSpan(span trace.Span) { - span.End() +type SpanOpt func(config *StartConfig) + +// WithHTTPRequest marks span as a HTTP request operation from client to server. +// It'll append attributes from the HTTP request object and mark it with `SpanKindClient` type. +func WithHTTPRequest(request *http.Request) SpanOpt { + return func(config *StartConfig) { + config.spanOpts = append(config.spanOpts, + trace.WithSpanKind(trace.SpanKindClient), // A client making a request to a server + trace.WithAttributes(httpconv.ClientRequest(request)...), // Add HTTP attributes + ) + } +} + +// StartSpan starts child span in a context. +func StartSpan(ctx context.Context, opName string, opts ...SpanOpt) (context.Context, *Span) { + config := StartConfig{} + for _, fn := range opts { + fn(&config) + } + tracer := otel.Tracer("") + if parent := trace.SpanFromContext(ctx); parent != nil && parent.SpanContext().IsValid() { + tracer = parent.TracerProvider().Tracer("") + } + ctx, span := tracer.Start(ctx, opName, config.spanOpts...) + return ctx, &Span{otelSpan: span} +} + +// SpanFromContext returns the current Span from the context. +func SpanFromContext(ctx context.Context) *Span { + return &Span{ + otelSpan: trace.SpanFromContext(ctx), + } +} + +// Span is wrapper around otel trace.Span. +// Span is the individual component of a trace. It represents a +// single named and timed operation of a workflow that is traced. +type Span struct { + otelSpan trace.Span +} + +// End completes the span. +func (s *Span) End() { + s.otelSpan.End() +} + +// AddEvent adds an event with provided name and options. +func (s *Span) AddEvent(name string, options ...trace.EventOption) { + s.otelSpan.AddEvent(name, options...) +} + +// SetStatus sets the status of the current span. +// If an error is encountered, it records the error and sets span status to Error. +func (s *Span) SetStatus(err error) { + if err != nil { + s.otelSpan.RecordError(err) + s.otelSpan.SetStatus(codes.Error, err.Error()) + } else { + s.otelSpan.SetStatus(codes.Ok, "") + } +} + +// SetAttributes sets kv as attributes of the span. +func (s *Span) SetAttributes(kv ...attribute.KeyValue) { + s.otelSpan.SetAttributes(kv...) +} + +// Name sets the span name by joining a list of strings in dot separated format. +func Name(names ...string) string { + return makeSpanName(names...) +} + +// Attribute takes a key value pair and returns attribute.KeyValue type. +func Attribute(k string, v interface{}) attribute.KeyValue { + return any(k, v) +} + +// HTTPStatusCodeAttributes generates attributes of the HTTP namespace as specified by the OpenTelemetry +// specification for a span. +func HTTPStatusCodeAttributes(code int) []attribute.KeyValue { + return []attribute.KeyValue{semconv.HTTPStatusCodeKey.Int(code)} } diff --git a/vendor/github.com/containerd/containerd/transfer.go b/vendor/github.com/containerd/containerd/transfer.go new file mode 100644 index 0000000000..9979aa75bd --- /dev/null +++ b/vendor/github.com/containerd/containerd/transfer.go @@ -0,0 +1,109 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containerd + +import ( + "context" + "errors" + "io" + + streamingapi "github.com/containerd/containerd/api/services/streaming/v1" + transferapi "github.com/containerd/containerd/api/services/transfer/v1" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/pkg/streaming" + "github.com/containerd/containerd/pkg/transfer" + "github.com/containerd/containerd/pkg/transfer/proxy" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/typeurl/v2" +) + +func (c *Client) Transfer(ctx context.Context, src interface{}, dest interface{}, opts ...transfer.Opt) error { + ctx, done, err := c.WithLease(ctx) + if err != nil { + return err + } + defer done(ctx) + + return proxy.NewTransferrer(transferapi.NewTransferClient(c.conn), c.streamCreator()).Transfer(ctx, src, dest, opts...) +} + +func (c *Client) streamCreator() streaming.StreamCreator { + return &streamCreator{ + client: streamingapi.NewStreamingClient(c.conn), + } +} + +type streamCreator struct { + client streamingapi.StreamingClient +} + +func (sc *streamCreator) Create(ctx context.Context, id string) (streaming.Stream, error) { + stream, err := sc.client.Stream(ctx) + if err != nil { + return nil, err + } + + a, err := typeurl.MarshalAny(&streamingapi.StreamInit{ + ID: id, + }) + if err != nil { + return nil, err + } + err = stream.Send(protobuf.FromAny(a)) + if err != nil { + if !errors.Is(err, io.EOF) { + err = errdefs.FromGRPC(err) + } + return nil, err + } + + // Receive an ack that stream is init and ready + if _, err = stream.Recv(); err != nil { + if !errors.Is(err, io.EOF) { + err = errdefs.FromGRPC(err) + } + return nil, err + } + + return &clientStream{ + s: stream, + }, nil +} + +type clientStream struct { + s streamingapi.Streaming_StreamClient +} + +func (cs *clientStream) Send(a typeurl.Any) (err error) { + err = cs.s.Send(protobuf.FromAny(a)) + if !errors.Is(err, io.EOF) { + err = errdefs.FromGRPC(err) + } + return +} + +func (cs *clientStream) Recv() (a typeurl.Any, err error) { + a, err = cs.s.Recv() + if !errors.Is(err, io.EOF) { + err = errdefs.FromGRPC(err) + } + return +} + +func (cs *clientStream) Close() error { + return cs.s.CloseSend() +} diff --git a/vendor/github.com/containerd/containerd/unpacker.go b/vendor/github.com/containerd/containerd/unpacker.go deleted file mode 100644 index 29bd595e16..0000000000 --- a/vendor/github.com/containerd/containerd/unpacker.go +++ /dev/null @@ -1,427 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package containerd - -import ( - "context" - "crypto/rand" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/pkg/kmutex" - "github.com/containerd/containerd/platforms" - "github.com/containerd/containerd/snapshots" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/identity" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" - "golang.org/x/sync/semaphore" -) - -const ( - labelSnapshotRef = "containerd.io/snapshot.ref" -) - -type unpacker struct { - updateCh chan ocispec.Descriptor - snapshotter string - config UnpackConfig - c *Client - limiter *semaphore.Weighted -} - -func (c *Client) newUnpacker(ctx context.Context, rCtx *RemoteContext) (*unpacker, error) { - snapshotter, err := c.resolveSnapshotterName(ctx, rCtx.Snapshotter) - if err != nil { - return nil, err - } - var config = UnpackConfig{ - DuplicationSuppressor: kmutex.NewNoop(), - } - for _, o := range rCtx.UnpackOpts { - if err := o(ctx, &config); err != nil { - return nil, err - } - } - var limiter *semaphore.Weighted - if rCtx.MaxConcurrentDownloads > 0 { - limiter = semaphore.NewWeighted(int64(rCtx.MaxConcurrentDownloads)) - } - return &unpacker{ - updateCh: make(chan ocispec.Descriptor, 128), - snapshotter: snapshotter, - config: config, - c: c, - limiter: limiter, - }, nil -} - -func (u *unpacker) unpack( - ctx context.Context, - rCtx *RemoteContext, - h images.Handler, - config ocispec.Descriptor, - layers []ocispec.Descriptor, -) error { - p, err := content.ReadBlob(ctx, u.c.ContentStore(), config) - if err != nil { - return err - } - - var i ocispec.Image - if err := json.Unmarshal(p, &i); err != nil { - return fmt.Errorf("unmarshal image config: %w", err) - } - diffIDs := i.RootFS.DiffIDs - if len(layers) != len(diffIDs) { - return fmt.Errorf("number of layers and diffIDs don't match: %d != %d", len(layers), len(diffIDs)) - } - - if u.config.CheckPlatformSupported { - imgPlatform := platforms.Normalize(ocispec.Platform{OS: i.OS, Architecture: i.Architecture}) - snapshotterPlatformMatcher, err := u.c.GetSnapshotterSupportedPlatforms(ctx, u.snapshotter) - if err != nil { - return fmt.Errorf("failed to find supported platforms for snapshotter %s: %w", u.snapshotter, err) - } - if !snapshotterPlatformMatcher.Match(imgPlatform) { - return fmt.Errorf("snapshotter %s does not support platform %s for image %s", u.snapshotter, imgPlatform, config.Digest) - } - } - - var ( - sn = u.c.SnapshotService(u.snapshotter) - a = u.c.DiffService() - cs = u.c.ContentStore() - - chain []digest.Digest - - fetchOffset int - fetchC []chan struct{} - fetchErr chan error - ) - - // If there is an early return, ensure any ongoing - // fetches get their context cancelled - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - doUnpackFn := func(i int, desc ocispec.Descriptor) error { - parent := identity.ChainID(chain) - chain = append(chain, diffIDs[i]) - chainID := identity.ChainID(chain).String() - - unlock, err := u.lockSnChainID(ctx, chainID) - if err != nil { - return err - } - defer unlock() - - if _, err := sn.Stat(ctx, chainID); err == nil { - // no need to handle - return nil - } else if !errdefs.IsNotFound(err) { - return fmt.Errorf("failed to stat snapshot %s: %w", chainID, err) - } - - // inherits annotations which are provided as snapshot labels. - labels := snapshots.FilterInheritedLabels(desc.Annotations) - if labels == nil { - labels = make(map[string]string) - } - labels[labelSnapshotRef] = chainID - - var ( - key string - mounts []mount.Mount - opts = append(rCtx.SnapshotterOpts, snapshots.WithLabels(labels)) - ) - - for try := 1; try <= 3; try++ { - // Prepare snapshot with from parent, label as root - key = fmt.Sprintf(snapshots.UnpackKeyFormat, uniquePart(), chainID) - mounts, err = sn.Prepare(ctx, key, parent.String(), opts...) - if err != nil { - if errdefs.IsAlreadyExists(err) { - if _, err := sn.Stat(ctx, chainID); err != nil { - if !errdefs.IsNotFound(err) { - return fmt.Errorf("failed to stat snapshot %s: %w", chainID, err) - } - // Try again, this should be rare, log it - log.G(ctx).WithField("key", key).WithField("chainid", chainID).Debug("extraction snapshot already exists, chain id not found") - } else { - // no need to handle, snapshot now found with chain id - return nil - } - } else { - return fmt.Errorf("failed to prepare extraction snapshot %q: %w", key, err) - } - } else { - break - } - } - if err != nil { - return fmt.Errorf("unable to prepare extraction snapshot: %w", err) - } - - // Abort the snapshot if commit does not happen - abort := func() { - if err := sn.Remove(ctx, key); err != nil { - log.G(ctx).WithError(err).Errorf("failed to cleanup %q", key) - } - } - - if fetchErr == nil { - fetchErr = make(chan error, 1) - fetchOffset = i - fetchC = make([]chan struct{}, len(layers)-fetchOffset) - for i := range fetchC { - fetchC[i] = make(chan struct{}) - } - - go func(i int) { - err := u.fetch(ctx, h, layers[i:], fetchC) - if err != nil { - fetchErr <- err - } - close(fetchErr) - }(i) - } - - select { - case <-ctx.Done(): - return ctx.Err() - case err := <-fetchErr: - if err != nil { - return err - } - case <-fetchC[i-fetchOffset]: - } - - diff, err := a.Apply(ctx, desc, mounts, u.config.ApplyOpts...) - if err != nil { - abort() - return fmt.Errorf("failed to extract layer %s: %w", diffIDs[i], err) - } - if diff.Digest != diffIDs[i] { - abort() - return fmt.Errorf("wrong diff id calculated on extraction %q", diffIDs[i]) - } - - if err = sn.Commit(ctx, chainID, key, opts...); err != nil { - abort() - if errdefs.IsAlreadyExists(err) { - return nil - } - return fmt.Errorf("failed to commit snapshot %s: %w", key, err) - } - - // Set the uncompressed label after the uncompressed - // digest has been verified through apply. - cinfo := content.Info{ - Digest: desc.Digest, - Labels: map[string]string{ - "containerd.io/uncompressed": diff.Digest.String(), - }, - } - if _, err := cs.Update(ctx, cinfo, "labels.containerd.io/uncompressed"); err != nil { - return err - } - return nil - } - - for i, desc := range layers { - if err := doUnpackFn(i, desc); err != nil { - return err - } - } - - chainID := identity.ChainID(chain).String() - cinfo := content.Info{ - Digest: config.Digest, - Labels: map[string]string{ - fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", u.snapshotter): chainID, - }, - } - _, err = cs.Update(ctx, cinfo, fmt.Sprintf("labels.containerd.io/gc.ref.snapshot.%s", u.snapshotter)) - if err != nil { - return err - } - log.G(ctx).WithFields(logrus.Fields{ - "config": config.Digest, - "chainID": chainID, - }).Debug("image unpacked") - - return nil -} - -func (u *unpacker) fetch(ctx context.Context, h images.Handler, layers []ocispec.Descriptor, done []chan struct{}) error { - eg, ctx2 := errgroup.WithContext(ctx) - for i, desc := range layers { - desc := desc - i := i - - if err := u.acquire(ctx); err != nil { - return err - } - - eg.Go(func() error { - unlock, err := u.lockBlobDescriptor(ctx2, desc) - if err != nil { - u.release() - return err - } - - _, err = h.Handle(ctx2, desc) - - unlock() - u.release() - - if err != nil && !errors.Is(err, images.ErrSkipDesc) { - return err - } - close(done[i]) - - return nil - }) - } - - return eg.Wait() -} - -func (u *unpacker) handlerWrapper( - uctx context.Context, - rCtx *RemoteContext, - unpacks *int32, -) (func(images.Handler) images.Handler, *errgroup.Group) { - eg, uctx := errgroup.WithContext(uctx) - return func(f images.Handler) images.Handler { - var ( - lock sync.Mutex - layers = map[digest.Digest][]ocispec.Descriptor{} - ) - return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - unlock, err := u.lockBlobDescriptor(ctx, desc) - if err != nil { - return nil, err - } - - children, err := f.Handle(ctx, desc) - unlock() - if err != nil { - return children, err - } - - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - var nonLayers []ocispec.Descriptor - var manifestLayers []ocispec.Descriptor - - // Split layers from non-layers, layers will be handled after - // the config - for _, child := range children { - if images.IsLayerType(child.MediaType) { - manifestLayers = append(manifestLayers, child) - } else { - nonLayers = append(nonLayers, child) - } - } - - lock.Lock() - for _, nl := range nonLayers { - layers[nl.Digest] = manifestLayers - } - lock.Unlock() - - children = nonLayers - case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: - lock.Lock() - l := layers[desc.Digest] - lock.Unlock() - if len(l) > 0 { - atomic.AddInt32(unpacks, 1) - eg.Go(func() error { - return u.unpack(uctx, rCtx, f, desc, l) - }) - } - } - return children, nil - }) - }, eg -} - -func (u *unpacker) acquire(ctx context.Context) error { - if u.limiter == nil { - return nil - } - return u.limiter.Acquire(ctx, 1) -} - -func (u *unpacker) release() { - if u.limiter == nil { - return - } - u.limiter.Release(1) -} - -func (u *unpacker) lockSnChainID(ctx context.Context, chainID string) (func(), error) { - key := u.makeChainIDKeyWithSnapshotter(chainID) - - if err := u.config.DuplicationSuppressor.Lock(ctx, key); err != nil { - return nil, err - } - return func() { - u.config.DuplicationSuppressor.Unlock(key) - }, nil -} - -func (u *unpacker) lockBlobDescriptor(ctx context.Context, desc ocispec.Descriptor) (func(), error) { - key := u.makeBlobDescriptorKey(desc) - - if err := u.config.DuplicationSuppressor.Lock(ctx, key); err != nil { - return nil, err - } - return func() { - u.config.DuplicationSuppressor.Unlock(key) - }, nil -} - -func (u *unpacker) makeChainIDKeyWithSnapshotter(chainID string) string { - return fmt.Sprintf("sn://%s/%v", u.snapshotter, chainID) -} - -func (u *unpacker) makeBlobDescriptorKey(desc ocispec.Descriptor) string { - return fmt.Sprintf("blob://%v", desc.Digest) -} - -func uniquePart() string { - t := time.Now() - var b [3]byte - // Ignore read failures, just decreases uniqueness - rand.Read(b[:]) - return fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) -} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index b436ed197d..350f81eace 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.6.24+unknown" + Version = "1.7.6+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go deleted file mode 100644 index ffde8a6508..0000000000 --- a/vendor/github.com/golang/protobuf/descriptor/descriptor.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package descriptor provides functions for obtaining the protocol buffer -// descriptors of generated Go types. -// -// Deprecated: See the "google.golang.org/protobuf/reflect/protoreflect" package -// for how to obtain an EnumDescriptor or MessageDescriptor in order to -// programatically interact with the protobuf type system. -package descriptor - -import ( - "bytes" - "compress/gzip" - "io/ioutil" - "sync" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoimpl" - - descriptorpb "github.com/golang/protobuf/protoc-gen-go/descriptor" -) - -// Message is proto.Message with a method to return its descriptor. -// -// Deprecated: The Descriptor method may not be generated by future -// versions of protoc-gen-go, meaning that this interface may not -// be implemented by many concrete message types. -type Message interface { - proto.Message - Descriptor() ([]byte, []int) -} - -// ForMessage returns the file descriptor proto containing -// the message and the message descriptor proto for the message itself. -// The returned proto messages must not be mutated. -// -// Deprecated: Not all concrete message types satisfy the Message interface. -// Use MessageDescriptorProto instead. If possible, the calling code should -// be rewritten to use protobuf reflection instead. -// See package "google.golang.org/protobuf/reflect/protoreflect" for details. -func ForMessage(m Message) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) { - return MessageDescriptorProto(m) -} - -type rawDesc struct { - fileDesc []byte - indexes []int -} - -var rawDescCache sync.Map // map[protoreflect.Descriptor]*rawDesc - -func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) { - // Fast-path: check whether raw descriptors are already cached. - origDesc := d - if v, ok := rawDescCache.Load(origDesc); ok { - return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes - } - - // Slow-path: derive the raw descriptor from the v2 descriptor. - - // Start with the leaf (a given enum or message declaration) and - // ascend upwards until we hit the parent file descriptor. - var idxs []int - for { - idxs = append(idxs, d.Index()) - d = d.Parent() - if d == nil { - // TODO: We could construct a FileDescriptor stub for standalone - // descriptors to satisfy the API. - return nil, nil - } - if _, ok := d.(protoreflect.FileDescriptor); ok { - break - } - } - - // Obtain the raw file descriptor. - fd := d.(protoreflect.FileDescriptor) - b, _ := proto.Marshal(protodesc.ToFileDescriptorProto(fd)) - file := protoimpl.X.CompressGZIP(b) - - // Reverse the indexes, since we populated it in reverse. - for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 { - idxs[i], idxs[j] = idxs[j], idxs[i] - } - - if v, ok := rawDescCache.LoadOrStore(origDesc, &rawDesc{file, idxs}); ok { - return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes - } - return file, idxs -} - -// EnumRawDescriptor returns the GZIP'd raw file descriptor representing -// the enum and the index path to reach the enum declaration. -// The returned slices must not be mutated. -func EnumRawDescriptor(e proto.GeneratedEnum) ([]byte, []int) { - if ev, ok := e.(interface{ EnumDescriptor() ([]byte, []int) }); ok { - return ev.EnumDescriptor() - } - ed := protoimpl.X.EnumTypeOf(e) - return deriveRawDescriptor(ed.Descriptor()) -} - -// MessageRawDescriptor returns the GZIP'd raw file descriptor representing -// the message and the index path to reach the message declaration. -// The returned slices must not be mutated. -func MessageRawDescriptor(m proto.GeneratedMessage) ([]byte, []int) { - if mv, ok := m.(interface{ Descriptor() ([]byte, []int) }); ok { - return mv.Descriptor() - } - md := protoimpl.X.MessageTypeOf(m) - return deriveRawDescriptor(md.Descriptor()) -} - -var fileDescCache sync.Map // map[*byte]*descriptorpb.FileDescriptorProto - -func deriveFileDescriptor(rawDesc []byte) *descriptorpb.FileDescriptorProto { - // Fast-path: check whether descriptor protos are already cached. - if v, ok := fileDescCache.Load(&rawDesc[0]); ok { - return v.(*descriptorpb.FileDescriptorProto) - } - - // Slow-path: derive the descriptor proto from the GZIP'd message. - zr, err := gzip.NewReader(bytes.NewReader(rawDesc)) - if err != nil { - panic(err) - } - b, err := ioutil.ReadAll(zr) - if err != nil { - panic(err) - } - fd := new(descriptorpb.FileDescriptorProto) - if err := proto.Unmarshal(b, fd); err != nil { - panic(err) - } - if v, ok := fileDescCache.LoadOrStore(&rawDesc[0], fd); ok { - return v.(*descriptorpb.FileDescriptorProto) - } - return fd -} - -// EnumDescriptorProto returns the file descriptor proto representing -// the enum and the enum descriptor proto for the enum itself. -// The returned proto messages must not be mutated. -func EnumDescriptorProto(e proto.GeneratedEnum) (*descriptorpb.FileDescriptorProto, *descriptorpb.EnumDescriptorProto) { - rawDesc, idxs := EnumRawDescriptor(e) - if rawDesc == nil || idxs == nil { - return nil, nil - } - fd := deriveFileDescriptor(rawDesc) - if len(idxs) == 1 { - return fd, fd.EnumType[idxs[0]] - } - md := fd.MessageType[idxs[0]] - for _, i := range idxs[1 : len(idxs)-1] { - md = md.NestedType[i] - } - ed := md.EnumType[idxs[len(idxs)-1]] - return fd, ed -} - -// MessageDescriptorProto returns the file descriptor proto representing -// the message and the message descriptor proto for the message itself. -// The returned proto messages must not be mutated. -func MessageDescriptorProto(m proto.GeneratedMessage) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) { - rawDesc, idxs := MessageRawDescriptor(m) - if rawDesc == nil || idxs == nil { - return nil, nil - } - fd := deriveFileDescriptor(rawDesc) - md := fd.MessageType[idxs[0]] - for _, i := range idxs[1:] { - md = md.NestedType[i] - } - return fd, md -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel deleted file mode 100644 index 5242751fb2..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -package(default_visibility = ["//visibility:public"]) - -proto_library( - name = "internal_proto", - srcs = ["errors.proto"], - deps = ["@com_google_protobuf//:any_proto"], -) - -go_proto_library( - name = "internal_go_proto", - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", - proto = ":internal_proto", -) - -go_library( - name = "go_default_library", - embed = [":internal_go_proto"], - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go deleted file mode 100644 index 61101d7177..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go +++ /dev/null @@ -1,189 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: internal/errors.proto - -package internal - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// Error is the generic error returned from unary RPCs. -type Error struct { - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - // This is to make the error more compatible with users that expect errors to be Status objects: - // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto - // It should be the exact same message as the Error field. - Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` - Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` - Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Error) Reset() { *m = Error{} } -func (m *Error) String() string { return proto.CompactTextString(m) } -func (*Error) ProtoMessage() {} -func (*Error) Descriptor() ([]byte, []int) { - return fileDescriptor_9b093362ca6d1e03, []int{0} -} - -func (m *Error) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Error.Unmarshal(m, b) -} -func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Error.Marshal(b, m, deterministic) -} -func (m *Error) XXX_Merge(src proto.Message) { - xxx_messageInfo_Error.Merge(m, src) -} -func (m *Error) XXX_Size() int { - return xxx_messageInfo_Error.Size(m) -} -func (m *Error) XXX_DiscardUnknown() { - xxx_messageInfo_Error.DiscardUnknown(m) -} - -var xxx_messageInfo_Error proto.InternalMessageInfo - -func (m *Error) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -func (m *Error) GetCode() int32 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *Error) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *Error) GetDetails() []*any.Any { - if m != nil { - return m.Details - } - return nil -} - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -type StreamError struct { - GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"` - HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"` - Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` - HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"` - Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StreamError) Reset() { *m = StreamError{} } -func (m *StreamError) String() string { return proto.CompactTextString(m) } -func (*StreamError) ProtoMessage() {} -func (*StreamError) Descriptor() ([]byte, []int) { - return fileDescriptor_9b093362ca6d1e03, []int{1} -} - -func (m *StreamError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamError.Unmarshal(m, b) -} -func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamError.Marshal(b, m, deterministic) -} -func (m *StreamError) XXX_Merge(src proto.Message) { - xxx_messageInfo_StreamError.Merge(m, src) -} -func (m *StreamError) XXX_Size() int { - return xxx_messageInfo_StreamError.Size(m) -} -func (m *StreamError) XXX_DiscardUnknown() { - xxx_messageInfo_StreamError.DiscardUnknown(m) -} - -var xxx_messageInfo_StreamError proto.InternalMessageInfo - -func (m *StreamError) GetGrpcCode() int32 { - if m != nil { - return m.GrpcCode - } - return 0 -} - -func (m *StreamError) GetHttpCode() int32 { - if m != nil { - return m.HttpCode - } - return 0 -} - -func (m *StreamError) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *StreamError) GetHttpStatus() string { - if m != nil { - return m.HttpStatus - } - return "" -} - -func (m *StreamError) GetDetails() []*any.Any { - if m != nil { - return m.Details - } - return nil -} - -func init() { - proto.RegisterType((*Error)(nil), "grpc.gateway.runtime.Error") - proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") -} - -func init() { proto.RegisterFile("internal/errors.proto", fileDescriptor_9b093362ca6d1e03) } - -var fileDescriptor_9b093362ca6d1e03 = []byte{ - // 252 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xc1, 0x4a, 0xc4, 0x30, - 0x10, 0x86, 0x89, 0xbb, 0x75, 0xdb, 0xe9, 0x2d, 0x54, 0x88, 0xee, 0xc1, 0xb2, 0xa7, 0x9e, 0x52, - 0xd0, 0x27, 0xd0, 0xc5, 0x17, 0xe8, 0xde, 0xbc, 0x2c, 0xd9, 0xdd, 0x31, 0x16, 0xda, 0xa4, 0x24, - 0x53, 0xa4, 0xf8, 0x56, 0x3e, 0xa1, 0x24, 0xa5, 0xb0, 0x27, 0xf1, 0xd6, 0xf9, 0xfb, 0xcf, 0x7c, - 0x1f, 0x81, 0xbb, 0xd6, 0x10, 0x3a, 0xa3, 0xba, 0x1a, 0x9d, 0xb3, 0xce, 0xcb, 0xc1, 0x59, 0xb2, - 0xbc, 0xd0, 0x6e, 0x38, 0x4b, 0xad, 0x08, 0xbf, 0xd4, 0x24, 0xdd, 0x68, 0xa8, 0xed, 0xf1, 0xe1, - 0x5e, 0x5b, 0xab, 0x3b, 0xac, 0x63, 0xe7, 0x34, 0x7e, 0xd4, 0xca, 0x4c, 0xf3, 0xc2, 0xee, 0x1b, - 0x92, 0xb7, 0x70, 0x80, 0x17, 0x90, 0xc4, 0x4b, 0x82, 0x95, 0xac, 0xca, 0x9a, 0x79, 0xe0, 0x1c, - 0xd6, 0x67, 0x7b, 0x41, 0x71, 0x53, 0xb2, 0x2a, 0x69, 0xe2, 0x37, 0x17, 0xb0, 0xe9, 0xd1, 0x7b, - 0xa5, 0x51, 0xac, 0x62, 0x77, 0x19, 0xb9, 0x84, 0xcd, 0x05, 0x49, 0xb5, 0x9d, 0x17, 0xeb, 0x72, - 0x55, 0xe5, 0x4f, 0x85, 0x9c, 0xc9, 0x72, 0x21, 0xcb, 0x17, 0x33, 0x35, 0x4b, 0x69, 0xf7, 0xc3, - 0x20, 0x3f, 0x90, 0x43, 0xd5, 0xcf, 0x0e, 0x5b, 0xc8, 0x82, 0xff, 0x31, 0x22, 0x59, 0x44, 0xa6, - 0x21, 0xd8, 0x07, 0xec, 0x16, 0xb2, 0x4f, 0xa2, 0xe1, 0x78, 0xe5, 0x93, 0x86, 0x60, 0xff, 0xb7, - 0xd3, 0x23, 0xe4, 0x71, 0xcd, 0x93, 0xa2, 0x31, 0x78, 0x85, 0xbf, 0x10, 0xa2, 0x43, 0x4c, 0xae, - 0xa5, 0x93, 0x7f, 0x48, 0xbf, 0xc2, 0x7b, 0xba, 0xbc, 0xfd, 0xe9, 0x36, 0x56, 0x9e, 0x7f, 0x03, - 0x00, 0x00, 0xff, 0xff, 0xde, 0x72, 0x6b, 0x83, 0x8e, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto deleted file mode 100644 index 4fb212c6b6..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; -package grpc.gateway.runtime; -option go_package = "internal"; - -import "google/protobuf/any.proto"; - -// Error is the generic error returned from unary RPCs. -message Error { - string error = 1; - // This is to make the error more compatible with users that expect errors to be Status objects: - // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto - // It should be the exact same message as the Error field. - int32 code = 2; - string message = 3; - repeated google.protobuf.Any details = 4; -} - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -message StreamError { - int32 grpc_code = 1; - int32 http_code = 2; - string message = 3; - string http_status = 4; - repeated google.protobuf.Any details = 5; -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel deleted file mode 100644 index 58b72b9cf7..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel +++ /dev/null @@ -1,85 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "convert.go", - "doc.go", - "errors.go", - "fieldmask.go", - "handler.go", - "marshal_httpbodyproto.go", - "marshal_json.go", - "marshal_jsonpb.go", - "marshal_proto.go", - "marshaler.go", - "marshaler_registry.go", - "mux.go", - "pattern.go", - "proto2_convert.go", - "proto_errors.go", - "query.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", - deps = [ - "//internal:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//descriptor:go_default_library_gen", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@go_googleapis//google/api:httpbody_go_proto", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//grpclog:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) - -go_test( - name = "go_default_test", - size = "small", - srcs = [ - "context_test.go", - "convert_test.go", - "errors_test.go", - "fieldmask_test.go", - "handler_test.go", - "marshal_httpbodyproto_test.go", - "marshal_json_test.go", - "marshal_jsonpb_test.go", - "marshal_proto_test.go", - "marshaler_registry_test.go", - "mux_test.go", - "pattern_test.go", - "query_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//internal:go_default_library", - "//runtime/internal/examplepb:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@com_github_golang_protobuf//ptypes:go_default_library_gen", - "@go_googleapis//google/api:httpbody_go_proto", - "@go_googleapis//google/rpc:errdetails_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:empty_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go deleted file mode 100644 index b2ce743bdd..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go +++ /dev/null @@ -1,186 +0,0 @@ -package runtime - -import ( - "context" - "io" - "net/http" - "strings" - - "github.com/grpc-ecosystem/grpc-gateway/internal" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. -// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto -func HTTPStatusFromCode(code codes.Code) int { - switch code { - case codes.OK: - return http.StatusOK - case codes.Canceled: - return http.StatusRequestTimeout - case codes.Unknown: - return http.StatusInternalServerError - case codes.InvalidArgument: - return http.StatusBadRequest - case codes.DeadlineExceeded: - return http.StatusGatewayTimeout - case codes.NotFound: - return http.StatusNotFound - case codes.AlreadyExists: - return http.StatusConflict - case codes.PermissionDenied: - return http.StatusForbidden - case codes.Unauthenticated: - return http.StatusUnauthorized - case codes.ResourceExhausted: - return http.StatusTooManyRequests - case codes.FailedPrecondition: - // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. - return http.StatusBadRequest - case codes.Aborted: - return http.StatusConflict - case codes.OutOfRange: - return http.StatusBadRequest - case codes.Unimplemented: - return http.StatusNotImplemented - case codes.Internal: - return http.StatusInternalServerError - case codes.Unavailable: - return http.StatusServiceUnavailable - case codes.DataLoss: - return http.StatusInternalServerError - } - - grpclog.Infof("Unknown gRPC error code: %v", code) - return http.StatusInternalServerError -} - -var ( - // HTTPError replies to the request with an error. - // - // HTTPError is called: - // - From generated per-endpoint gateway handler code, when calling the backend results in an error. - // - From gateway runtime code, when forwarding the response message results in an error. - // - // The default value for HTTPError calls the custom error handler configured on the ServeMux via the - // WithProtoErrorHandler serve option if that option was used, calling GlobalHTTPErrorHandler otherwise. - // - // To customize the error handling of a particular ServeMux instance, use the WithProtoErrorHandler - // serve option. - // - // To customize the error format for all ServeMux instances not using the WithProtoErrorHandler serve - // option, set GlobalHTTPErrorHandler to a custom function. - // - // Setting this variable directly to customize error format is deprecated. - HTTPError = MuxOrGlobalHTTPError - - // GlobalHTTPErrorHandler is the HTTPError handler for all ServeMux instances not using the - // WithProtoErrorHandler serve option. - // - // You can set a custom function to this variable to customize error format. - GlobalHTTPErrorHandler = DefaultHTTPError - - // OtherErrorHandler handles gateway errors from parsing and routing client requests for all - // ServeMux instances not using the WithProtoErrorHandler serve option. - // - // It returns the following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest - // - // To customize parsing and routing error handling of a particular ServeMux instance, use the - // WithProtoErrorHandler serve option. - // - // To customize parsing and routing error handling of all ServeMux instances not using the - // WithProtoErrorHandler serve option, set a custom function to this variable. - OtherErrorHandler = DefaultOtherErrorHandler -) - -// MuxOrGlobalHTTPError uses the mux-configured error handler, falling back to GlobalErrorHandler. -func MuxOrGlobalHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { - if mux.protoErrorHandler != nil { - mux.protoErrorHandler(ctx, mux, marshaler, w, r, err) - } else { - GlobalHTTPErrorHandler(ctx, mux, marshaler, w, r, err) - } -} - -// DefaultHTTPError is the default implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a JSON object, -// which contains a member whose key is "error" and whose value is err.Error(). -func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { - const fallback = `{"error": "failed to marshal error message"}` - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - - w.Header().Del("Trailer") - w.Header().Del("Transfer-Encoding") - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatibility - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { - pb := s.Proto() - contentType = typeMarshaler.ContentTypeFromMessage(pb) - } - w.Header().Set("Content-Type", contentType) - - body := &internal.Error{ - Error: s.Message(), - Message: s.Message(), - Code: int32(s.Code()), - Details: s.Proto().GetDetails(), - } - - buf, merr := marshaler.Marshal(body) - if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", body, merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - - // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 - // Unless the request includes a TE header field indicating "trailers" - // is acceptable, as described in Section 4.3, a server SHOULD NOT - // generate trailer fields that it believes are necessary for the user - // agent to receive. - var wantsTrailers bool - - if te := r.Header.Get("TE"); strings.Contains(strings.ToLower(te), "trailers") { - wantsTrailers = true - handleForwardResponseTrailerHeader(w, md) - w.Header().Set("Transfer-Encoding", "chunked") - } - - st := HTTPStatusFromCode(s.Code()) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - if wantsTrailers { - handleForwardResponseTrailer(w, md) - } -} - -// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. -// It simply writes a string representation of the given error into "w". -func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { - http.Error(w, msg, code) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go deleted file mode 100644 index aef645e40b..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go +++ /dev/null @@ -1,89 +0,0 @@ -package runtime - -import ( - "encoding/json" - "io" - "strings" - - descriptor2 "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/protoc-gen-go/descriptor" - "google.golang.org/genproto/protobuf/field_mask" -) - -func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) { - // TODO - should really gate this with a test that the marshaller has used json names - if md != nil { - for _, f := range md.Field { - if f.JsonName != nil && f.Name != nil && *f.JsonName == name { - var subType *descriptor.DescriptorProto - - // If the field has a TypeName then we retrieve the nested type for translating the embedded message names. - if f.TypeName != nil { - typeSplit := strings.Split(*f.TypeName, ".") - typeName := typeSplit[len(typeSplit)-1] - for _, t := range md.NestedType { - if typeName == *t.Name { - subType = t - } - } - } - return *f.Name, subType - } - } - } - return name, nil -} - -// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. -func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) { - fm := &field_mask.FieldMask{} - var root interface{} - if err := json.NewDecoder(r).Decode(&root); err != nil { - if err == io.EOF { - return fm, nil - } - return nil, err - } - - queue := []fieldMaskPathItem{{node: root, md: md}} - for len(queue) > 0 { - // dequeue an item - item := queue[0] - queue = queue[1:] - - if m, ok := item.node.(map[string]interface{}); ok { - // if the item is an object, then enqueue all of its children - for k, v := range m { - protoName, subMd := translateName(k, item.md) - if subMsg, ok := v.(descriptor2.Message); ok { - _, subMd = descriptor2.ForMessage(subMsg) - } - - var path string - if item.path == "" { - path = protoName - } else { - path = item.path + "." + protoName - } - queue = append(queue, fieldMaskPathItem{path: path, node: v, md: subMd}) - } - } else if len(item.path) > 0 { - // otherwise, it's a leaf node so print its path - fm.Paths = append(fm.Paths, item.path) - } - } - - return fm, nil -} - -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask -type fieldMaskPathItem struct { - // the list of prior fields leading up to node connected by dots - path string - - // a generic decoded json object the current item to inspect for further path extraction - node interface{} - - // descriptor for parent message - md *descriptor.DescriptorProto -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go deleted file mode 100644 index 523a9cb43c..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go +++ /dev/null @@ -1,300 +0,0 @@ -package runtime - -import ( - "context" - "fmt" - "net/http" - "net/textproto" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// A HandlerFunc handles a specific pair of path pattern and HTTP method. -type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) - -// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when -// a request is received with a URI path that does not match any registered -// service method. -// -// Since gRPC servers return an "Unimplemented" code for requests with an -// unrecognized URI path, this error also has a gRPC "Unimplemented" code. -var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) - -// ServeMux is a request multiplexer for grpc-gateway. -// It matches http requests to patterns and invokes the corresponding handler. -type ServeMux struct { - // handlers maps HTTP method to a list of handlers. - handlers map[string][]handler - forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error - marshalers marshalerRegistry - incomingHeaderMatcher HeaderMatcherFunc - outgoingHeaderMatcher HeaderMatcherFunc - metadataAnnotators []func(context.Context, *http.Request) metadata.MD - streamErrorHandler StreamErrorHandlerFunc - protoErrorHandler ProtoErrorHandlerFunc - disablePathLengthFallback bool - lastMatchWins bool -} - -// ServeMuxOption is an option that can be given to a ServeMux on construction. -type ServeMuxOption func(*ServeMux) - -// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. -// -// forwardResponseOption is an option that will be called on the relevant context.Context, -// http.ResponseWriter, and proto.Message before every forwarded response. -// -// The message may be nil in the case where just a header is being sent. -func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) - } -} - -// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. -// Configuring this will mean the generated swagger output is no longer correct, and it should be -// done with careful consideration. -func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption { - return func(serveMux *ServeMux) { - currentQueryParser = queryParameterParser - } -} - -// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. -type HeaderMatcherFunc func(string) (string, bool) - -// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header -// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with -// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. -func DefaultHeaderMatcher(key string) (string, bool) { - key = textproto.CanonicalMIMEHeaderKey(key) - if isPermanentHTTPHeader(key) { - return MetadataPrefix + key, true - } else if strings.HasPrefix(key, MetadataHeaderPrefix) { - return key[len(MetadataHeaderPrefix):], true - } - return "", false -} - -// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. -// -// This matcher will be called with each header in http.Request. If matcher returns true, that header will be -// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. -func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { - return func(mux *ServeMux) { - mux.incomingHeaderMatcher = fn - } -} - -// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. -// -// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be -// passed to http response returned from gateway. To transform the header before passing to response, -// matcher should return modified header. -func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { - return func(mux *ServeMux) { - mux.outgoingHeaderMatcher = fn - } -} - -// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. -// -// This can be used by services that need to read from http.Request and modify gRPC context. A common use case -// is reading token from cookie and adding it in gRPC context. -func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) - } -} - -// WithProtoErrorHandler returns a ServeMuxOption for configuring a custom error handler. -// -// This can be used to handle an error as general proto message defined by gRPC. -// When this option is used, the mux uses the configured error handler instead of HTTPError and -// OtherErrorHandler. -func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.protoErrorHandler = fn - } -} - -// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. -func WithDisablePathLengthFallback() ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.disablePathLengthFallback = true - } -} - -// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream -// error handler, which allows for customizing the error trailer for server-streaming -// calls. -// -// For stream errors that occur before any response has been written, the mux's -// ProtoErrorHandler will be invoked. However, once data has been written, the errors must -// be handled differently: they must be included in the response body. The response body's -// final message will include the error details returned by the stream error handler. -func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.streamErrorHandler = fn - } -} - -// WithLastMatchWins returns a ServeMuxOption that will enable "last -// match wins" behavior, where if multiple path patterns match a -// request path, the last one defined in the .proto file will be used. -func WithLastMatchWins() ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.lastMatchWins = true - } -} - -// NewServeMux returns a new ServeMux whose internal mapping is empty. -func NewServeMux(opts ...ServeMuxOption) *ServeMux { - serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - streamErrorHandler: DefaultHTTPStreamErrorHandler, - } - - for _, opt := range opts { - opt(serveMux) - } - - if serveMux.incomingHeaderMatcher == nil { - serveMux.incomingHeaderMatcher = DefaultHeaderMatcher - } - - if serveMux.outgoingHeaderMatcher == nil { - serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { - return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true - } - } - - return serveMux -} - -// Handle associates "h" to the pair of HTTP method and path pattern. -func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { - if s.lastMatchWins { - s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...) - } else { - s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) - } -} - -// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. -func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - path := r.URL.Path - if !strings.HasPrefix(path, "/") { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) - } - return - } - - components := strings.Split(path[1:], "/") - l := len(components) - var verb string - if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } - return - } else if idx > 0 { - c := components[l-1] - components[l-1], verb = c[:idx], c[idx+1:] - } - - if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { - r.Method = strings.ToUpper(override) - if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } - return - } - } - for _, h := range s.handlers[r.Method] { - pathParams, err := h.pat.Match(components, verb) - if err != nil { - continue - } - h.h(w, r, pathParams) - return - } - - // lookup other methods to handle fallback from GET to POST and - // to determine if it is MethodNotAllowed or NotFound. - for m, handlers := range s.handlers { - if m == r.Method { - continue - } - for _, h := range handlers { - pathParams, err := h.pat.Match(components, verb) - if err != nil { - continue - } - // X-HTTP-Method-Override is optional. Always allow fallback to POST. - if s.isPathLengthFallback(r) { - if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } - return - } - h.h(w, r, pathParams) - return - } - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) - } - return - } - } - - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } -} - -// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. -func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { - return s.forwardResponseOptions -} - -func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { - return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" -} - -type handler struct { - pat Pattern - h HandlerFunc -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go deleted file mode 100644 index 3fd30da22a..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go +++ /dev/null @@ -1,106 +0,0 @@ -package runtime - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/ptypes/any" - "github.com/grpc-ecosystem/grpc-gateway/internal" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a -// a proto struct used to represent error at the end of a stream. -type StreamErrorHandlerFunc func(context.Context, error) *StreamError - -// StreamError is the payload for the final message in a server stream in the event that the server returns an -// error after a response message has already been sent. -type StreamError internal.StreamError - -// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. -type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) - -var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler - -// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a Status message marshaled by a Marshaler. -// -// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. -func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { - // return Internal when Marshal failed - const fallback = `{"code": 13, "message": "failed to marshal error message"}` - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - - w.Header().Del("Trailer") - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatibility - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { - pb := s.Proto() - contentType = typeMarshaler.ContentTypeFromMessage(pb) - } - w.Header().Set("Content-Type", contentType) - - buf, merr := marshaler.Marshal(s.Proto()) - if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(s.Code()) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} - -// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via -// default logic. -// -// It extracts the gRPC status from err if possible. The fields of the status are -// used to populate the returned StreamError, and the HTTP status code is derived -// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a -// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code. -func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError { - grpcCode := codes.Unknown - grpcMessage := err.Error() - var grpcDetails []*any.Any - if s, ok := status.FromError(err); ok { - grpcCode = s.Code() - grpcMessage = s.Message() - grpcDetails = s.Proto().GetDetails() - } - httpCode := HTTPStatusFromCode(grpcCode) - return &StreamError{ - GrpcCode: int32(grpcCode), - HttpCode: int32(httpCode), - Message: grpcMessage, - HttpStatus: http.StatusText(httpCode), - Details: grpcDetails, - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go deleted file mode 100644 index ba66842c33..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go +++ /dev/null @@ -1,406 +0,0 @@ -package runtime - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc/grpclog" -) - -var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$") - -var currentQueryParser QueryParameterParser = &defaultQueryParser{} - -// QueryParameterParser defines interface for all query parameter parsers -type QueryParameterParser interface { - Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error -} - -// PopulateQueryParameters parses query parameters -// into "msg" using current query parser -func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { - return currentQueryParser.Parse(msg, values, filter) -} - -type defaultQueryParser struct{} - -// Parse populates "values" into "msg". -// A value is ignored if its key starts with one of the elements in "filter". -func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { - for key, values := range values { - match := valuesKeyRegexp.FindStringSubmatch(key) - if len(match) == 3 { - key = match[1] - values = append([]string{match[2]}, values...) - } - fieldPath := strings.Split(key, ".") - if filter.HasCommonPrefix(fieldPath) { - continue - } - if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { - return err - } - } - return nil -} - -// PopulateFieldFromPath sets a value in a nested Protobuf structure. -// It instantiates missing protobuf fields as it goes. -func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { - fieldPath := strings.Split(fieldPathString, ".") - return populateFieldValueFromPath(msg, fieldPath, []string{value}) -} - -func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { - m := reflect.ValueOf(msg) - if m.Kind() != reflect.Ptr { - return fmt.Errorf("unexpected type %T: %v", msg, msg) - } - var props *proto.Properties - m = m.Elem() - for i, fieldName := range fieldPath { - isLast := i == len(fieldPath)-1 - if !isLast && m.Kind() != reflect.Struct { - return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) - } - var f reflect.Value - var err error - f, props, err = fieldByProtoName(m, fieldName) - if err != nil { - return err - } else if !f.IsValid() { - grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) - return nil - } - - switch f.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: - if !isLast { - return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) - } - m = f - case reflect.Slice: - if !isLast { - return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) - } - // Handle []byte - if f.Type().Elem().Kind() == reflect.Uint8 { - m = f - break - } - return populateRepeatedField(f, values, props) - case reflect.Ptr: - if f.IsNil() { - m = reflect.New(f.Type().Elem()) - f.Set(m.Convert(f.Type())) - } - m = f.Elem() - continue - case reflect.Struct: - m = f - continue - case reflect.Map: - if !isLast { - return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) - } - return populateMapField(f, values, props) - default: - return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) - } - } - switch len(values) { - case 0: - return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) - case 1: - default: - grpclog.Infof("too many field values: %s", strings.Join(fieldPath, ".")) - } - return populateField(m, values[0], props) -} - -// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". -// "m" must be a struct value. It returns zero reflect.Value if no such field found. -func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { - props := proto.GetProperties(m.Type()) - - // look up field name in oneof map - for _, op := range props.OneofTypes { - if name == op.Prop.OrigName || name == op.Prop.JSONName { - v := reflect.New(op.Type.Elem()) - field := m.Field(op.Field) - if !field.IsNil() { - return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) - } - field.Set(v) - return v.Elem().Field(0), op.Prop, nil - } - } - - for _, p := range props.Prop { - if p.OrigName == name { - return m.FieldByName(p.Name), p, nil - } - if p.JSONName == name { - return m.FieldByName(p.Name), p, nil - } - } - return reflect.Value{}, nil, nil -} - -func populateMapField(f reflect.Value, values []string, props *proto.Properties) error { - if len(values) != 2 { - return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name) - } - - key, value := values[0], values[1] - keyType := f.Type().Key() - valueType := f.Type().Elem() - if f.IsNil() { - f.Set(reflect.MakeMap(f.Type())) - } - - keyConv, ok := convFromType[keyType.Kind()] - if !ok { - return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name) - } - valueConv, ok := convFromType[valueType.Kind()] - if !ok { - return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name) - } - - keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)}) - if err := keyV[1].Interface(); err != nil { - return err.(error) - } - valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)}) - if err := valueV[1].Interface(); err != nil { - return err.(error) - } - - f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType)) - - return nil -} - -func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { - elemType := f.Type().Elem() - - // is the destination field a slice of an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnumRepeated(f, values, enumValMap) - } - - conv, ok := convFromType[elemType.Kind()] - if !ok { - return fmt.Errorf("unsupported field type %s", elemType) - } - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) - for i, v := range values { - result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Index(i).Set(result[0].Convert(f.Index(i).Type())) - } - return nil -} - -func populateField(f reflect.Value, value string, props *proto.Properties) error { - i := f.Addr().Interface() - - // Handle protobuf well known types - var name string - switch m := i.(type) { - case interface{ XXX_WellKnownType() string }: - name = m.XXX_WellKnownType() - case proto.Message: - const wktPrefix = "google.protobuf." - if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) { - name = fullName[len(wktPrefix):] - } - } - switch name { - case "Timestamp": - if value == "null" { - f.FieldByName("Seconds").SetInt(0) - f.FieldByName("Nanos").SetInt(0) - return nil - } - - t, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - f.FieldByName("Seconds").SetInt(int64(t.Unix())) - f.FieldByName("Nanos").SetInt(int64(t.Nanosecond())) - return nil - case "Duration": - if value == "null" { - f.FieldByName("Seconds").SetInt(0) - f.FieldByName("Nanos").SetInt(0) - return nil - } - d, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - ns := d.Nanoseconds() - s := ns / 1e9 - ns %= 1e9 - f.FieldByName("Seconds").SetInt(s) - f.FieldByName("Nanos").SetInt(ns) - return nil - case "DoubleValue": - fallthrough - case "FloatValue": - float64Val, err := strconv.ParseFloat(value, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.FieldByName("Value").SetFloat(float64Val) - return nil - case "Int64Value": - fallthrough - case "Int32Value": - int64Val, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.FieldByName("Value").SetInt(int64Val) - return nil - case "UInt64Value": - fallthrough - case "UInt32Value": - uint64Val, err := strconv.ParseUint(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.FieldByName("Value").SetUint(uint64Val) - return nil - case "BoolValue": - if value == "true" { - f.FieldByName("Value").SetBool(true) - } else if value == "false" { - f.FieldByName("Value").SetBool(false) - } else { - return fmt.Errorf("bad BoolValue: %s", value) - } - return nil - case "StringValue": - f.FieldByName("Value").SetString(value) - return nil - case "BytesValue": - bytesVal, err := base64.StdEncoding.DecodeString(value) - if err != nil { - return fmt.Errorf("bad BytesValue: %s", value) - } - f.FieldByName("Value").SetBytes(bytesVal) - return nil - case "FieldMask": - p := f.FieldByName("Paths") - for _, v := range strings.Split(value, ",") { - if v != "" { - p.Set(reflect.Append(p, reflect.ValueOf(v))) - } - } - return nil - } - - // Handle Time and Duration stdlib types - switch t := i.(type) { - case *time.Time: - pt, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - *t = pt - return nil - case *time.Duration: - d, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - *t = d - return nil - } - - // is the destination field an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnum(f, value, enumValMap) - } - - conv, ok := convFromType[f.Kind()] - if !ok { - return fmt.Errorf("field type %T is not supported in query parameters", i) - } - result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Set(result[0].Convert(f.Type())) - return nil -} - -func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { - // see if it's an enumeration string - if enumVal, ok := enumValMap[value]; ok { - return reflect.ValueOf(enumVal).Convert(t), nil - } - - // check for an integer that matches an enumeration value - eVal, err := strconv.Atoi(value) - if err != nil { - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) - } - for _, v := range enumValMap { - if v == int32(eVal) { - return reflect.ValueOf(eVal).Convert(t), nil - } - } - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) -} - -func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { - cval, err := convertEnum(value, f.Type(), enumValMap) - if err != nil { - return err - } - f.Set(cval) - return nil -} - -func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { - elemType := f.Type().Elem() - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) - for i, v := range values { - result, err := convertEnum(v, elemType, enumValMap) - if err != nil { - return err - } - f.Index(i).Set(result) - } - return nil -} - -var ( - convFromType = map[reflect.Kind]reflect.Value{ - reflect.String: reflect.ValueOf(String), - reflect.Bool: reflect.ValueOf(Bool), - reflect.Float64: reflect.ValueOf(Float64), - reflect.Float32: reflect.ValueOf(Float32), - reflect.Int64: reflect.ValueOf(Int64), - reflect.Int32: reflect.ValueOf(Int32), - reflect.Uint64: reflect.ValueOf(Uint64), - reflect.Uint32: reflect.ValueOf(Uint32), - reflect.Slice: reflect.ValueOf(Bytes), - } -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel deleted file mode 100644 index 7109d79323..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "pattern.go", - "readerfactory.go", - "trie.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", -) - -go_test( - name = "go_default_test", - size = "small", - srcs = ["trie_test.go"], - embed = [":go_default_library"], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt similarity index 100% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel new file mode 100644 index 0000000000..f694f3c0d0 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "httprule", + srcs = [ + "compile.go", + "parse.go", + "types.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule", + deps = ["//utilities"], +) + +go_test( + name = "httprule_test", + size = "small", + srcs = [ + "compile_test.go", + "parse_test.go", + "types_test.go", + ], + embed = [":httprule"], + deps = [ + "//utilities", + "@com_github_golang_glog//:glog", + ], +) + +alias( + name = "go_default_library", + actual = ":httprule", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go new file mode 100644 index 0000000000..3cd9372959 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go @@ -0,0 +1,121 @@ +package httprule + +import ( + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" +) + +const ( + opcodeVersion = 1 +) + +// Template is a compiled representation of path templates. +type Template struct { + // Version is the version number of the format. + Version int + // OpCodes is a sequence of operations. + OpCodes []int + // Pool is a constant pool + Pool []string + // Verb is a VERB part in the template. + Verb string + // Fields is a list of field paths bound in this template. + Fields []string + // Original template (example: /v1/a_bit_of_everything) + Template string +} + +// Compiler compiles utilities representation of path templates into marshallable operations. +// They can be unmarshalled by runtime.NewPattern. +type Compiler interface { + Compile() Template +} + +type op struct { + // code is the opcode of the operation + code utilities.OpCode + + // str is a string operand of the code. + // num is ignored if str is not empty. + str string + + // num is a numeric operand of the code. + num int +} + +func (w wildcard) compile() []op { + return []op{ + {code: utilities.OpPush}, + } +} + +func (w deepWildcard) compile() []op { + return []op{ + {code: utilities.OpPushM}, + } +} + +func (l literal) compile() []op { + return []op{ + { + code: utilities.OpLitPush, + str: string(l), + }, + } +} + +func (v variable) compile() []op { + var ops []op + for _, s := range v.segments { + ops = append(ops, s.compile()...) + } + ops = append(ops, op{ + code: utilities.OpConcatN, + num: len(v.segments), + }, op{ + code: utilities.OpCapture, + str: v.path, + }) + + return ops +} + +func (t template) Compile() Template { + var rawOps []op + for _, s := range t.segments { + rawOps = append(rawOps, s.compile()...) + } + + var ( + ops []int + pool []string + fields []string + ) + consts := make(map[string]int) + for _, op := range rawOps { + ops = append(ops, int(op.code)) + if op.str == "" { + ops = append(ops, op.num) + } else { + // eof segment literal represents the "/" path pattern + if op.str == eof { + op.str = "" + } + if _, ok := consts[op.str]; !ok { + consts[op.str] = len(pool) + pool = append(pool, op.str) + } + ops = append(ops, consts[op.str]) + } + if op.code == utilities.OpCapture { + fields = append(fields, op.str) + } + } + return Template{ + Version: opcodeVersion, + OpCodes: ops, + Pool: pool, + Verb: t.verb, + Fields: fields, + Template: t.template, + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go new file mode 100644 index 0000000000..138f7c12f0 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go @@ -0,0 +1,11 @@ +// +build gofuzz + +package httprule + +func Fuzz(data []byte) int { + _, err := Parse(string(data)) + if err != nil { + return 0 + } + return 0 +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go new file mode 100644 index 0000000000..f008f7cc94 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go @@ -0,0 +1,369 @@ +package httprule + +import ( + "fmt" + "strings" +) + +// InvalidTemplateError indicates that the path template is not valid. +type InvalidTemplateError struct { + tmpl string + msg string +} + +func (e InvalidTemplateError) Error() string { + return fmt.Sprintf("%s: %s", e.msg, e.tmpl) +} + +// Parse parses the string representation of path template +func Parse(tmpl string) (Compiler, error) { + if !strings.HasPrefix(tmpl, "/") { + return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"} + } + tokens, verb := tokenize(tmpl[1:]) + + p := parser{tokens: tokens} + segs, err := p.topLevelSegments() + if err != nil { + return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()} + } + + return template{ + segments: segs, + verb: verb, + template: tmpl, + }, nil +} + +func tokenize(path string) (tokens []string, verb string) { + if path == "" { + return []string{eof}, "" + } + + const ( + init = iota + field + nested + ) + st := init + for path != "" { + var idx int + switch st { + case init: + idx = strings.IndexAny(path, "/{") + case field: + idx = strings.IndexAny(path, ".=}") + case nested: + idx = strings.IndexAny(path, "/}") + } + if idx < 0 { + tokens = append(tokens, path) + break + } + switch r := path[idx]; r { + case '/', '.': + case '{': + st = field + case '=': + st = nested + case '}': + st = init + } + if idx == 0 { + tokens = append(tokens, path[idx:idx+1]) + } else { + tokens = append(tokens, path[:idx], path[idx:idx+1]) + } + path = path[idx+1:] + } + + l := len(tokens) + // See + // https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ; + // although normal and backwards-compat logic here is to use the last index + // of a colon, if the final segment is a variable followed by a colon, the + // part following the colon must be a verb. Hence if the previous token is + // an end var marker, we switch the index we're looking for to Index instead + // of LastIndex, so that we correctly grab the remaining part of the path as + // the verb. + var penultimateTokenIsEndVar bool + switch l { + case 0, 1: + // Not enough to be variable so skip this logic and don't result in an + // invalid index + default: + penultimateTokenIsEndVar = tokens[l-2] == "}" + } + t := tokens[l-1] + var idx int + if penultimateTokenIsEndVar { + idx = strings.Index(t, ":") + } else { + idx = strings.LastIndex(t, ":") + } + if idx == 0 { + tokens, verb = tokens[:l-1], t[1:] + } else if idx > 0 { + tokens[l-1], verb = t[:idx], t[idx+1:] + } + tokens = append(tokens, eof) + return tokens, verb +} + +// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto. +type parser struct { + tokens []string + accepted []string +} + +// topLevelSegments is the target of this parser. +func (p *parser) topLevelSegments() ([]segment, error) { + if _, err := p.accept(typeEOF); err == nil { + p.tokens = p.tokens[:0] + return []segment{literal(eof)}, nil + } + segs, err := p.segments() + if err != nil { + return nil, err + } + if _, err := p.accept(typeEOF); err != nil { + return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, "")) + } + return segs, nil +} + +func (p *parser) segments() ([]segment, error) { + s, err := p.segment() + if err != nil { + return nil, err + } + + segs := []segment{s} + for { + if _, err := p.accept("/"); err != nil { + return segs, nil + } + s, err := p.segment() + if err != nil { + return segs, err + } + segs = append(segs, s) + } +} + +func (p *parser) segment() (segment, error) { + if _, err := p.accept("*"); err == nil { + return wildcard{}, nil + } + if _, err := p.accept("**"); err == nil { + return deepWildcard{}, nil + } + if l, err := p.literal(); err == nil { + return l, nil + } + + v, err := p.variable() + if err != nil { + return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err) + } + return v, err +} + +func (p *parser) literal() (segment, error) { + lit, err := p.accept(typeLiteral) + if err != nil { + return nil, err + } + return literal(lit), nil +} + +func (p *parser) variable() (segment, error) { + if _, err := p.accept("{"); err != nil { + return nil, err + } + + path, err := p.fieldPath() + if err != nil { + return nil, err + } + + var segs []segment + if _, err := p.accept("="); err == nil { + segs, err = p.segments() + if err != nil { + return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err) + } + } else { + segs = []segment{wildcard{}} + } + + if _, err := p.accept("}"); err != nil { + return nil, fmt.Errorf("unterminated variable segment: %s", path) + } + return variable{ + path: path, + segments: segs, + }, nil +} + +func (p *parser) fieldPath() (string, error) { + c, err := p.accept(typeIdent) + if err != nil { + return "", err + } + components := []string{c} + for { + if _, err = p.accept("."); err != nil { + return strings.Join(components, "."), nil + } + c, err := p.accept(typeIdent) + if err != nil { + return "", fmt.Errorf("invalid field path component: %v", err) + } + components = append(components, c) + } +} + +// A termType is a type of terminal symbols. +type termType string + +// These constants define some of valid values of termType. +// They improve readability of parse functions. +// +// You can also use "/", "*", "**", "." or "=" as valid values. +const ( + typeIdent = termType("ident") + typeLiteral = termType("literal") + typeEOF = termType("$") +) + +const ( + // eof is the terminal symbol which always appears at the end of token sequence. + eof = "\u0000" +) + +// accept tries to accept a token in "p". +// This function consumes a token and returns it if it matches to the specified "term". +// If it doesn't match, the function does not consume any tokens and return an error. +func (p *parser) accept(term termType) (string, error) { + t := p.tokens[0] + switch term { + case "/", "*", "**", ".", "=", "{", "}": + if t != string(term) && t != "/" { + return "", fmt.Errorf("expected %q but got %q", term, t) + } + case typeEOF: + if t != eof { + return "", fmt.Errorf("expected EOF but got %q", t) + } + case typeIdent: + if err := expectIdent(t); err != nil { + return "", err + } + case typeLiteral: + if err := expectPChars(t); err != nil { + return "", err + } + default: + return "", fmt.Errorf("unknown termType %q", term) + } + p.tokens = p.tokens[1:] + p.accepted = append(p.accepted, t) + return t, nil +} + +// expectPChars determines if "t" consists of only pchars defined in RFC3986. +// +// https://www.ietf.org/rfc/rfc3986.txt, P.49 +// +// pchar = unreserved / pct-encoded / sub-delims / ":" / "@" +// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" +// sub-delims = "!" / "$" / "&" / "'" / "(" / ")" +// / "*" / "+" / "," / ";" / "=" +// pct-encoded = "%" HEXDIG HEXDIG +func expectPChars(t string) error { + const ( + init = iota + pct1 + pct2 + ) + st := init + for _, r := range t { + if st != init { + if !isHexDigit(r) { + return fmt.Errorf("invalid hexdigit: %c(%U)", r, r) + } + switch st { + case pct1: + st = pct2 + case pct2: + st = init + } + continue + } + + // unreserved + switch { + case 'A' <= r && r <= 'Z': + continue + case 'a' <= r && r <= 'z': + continue + case '0' <= r && r <= '9': + continue + } + switch r { + case '-', '.', '_', '~': + // unreserved + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': + // sub-delims + case ':', '@': + // rest of pchar + case '%': + // pct-encoded + st = pct1 + default: + return fmt.Errorf("invalid character in path segment: %q(%U)", r, r) + } + } + if st != init { + return fmt.Errorf("invalid percent-encoding in %q", t) + } + return nil +} + +// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*). +func expectIdent(ident string) error { + if ident == "" { + return fmt.Errorf("empty identifier") + } + for pos, r := range ident { + switch { + case '0' <= r && r <= '9': + if pos == 0 { + return fmt.Errorf("identifier starting with digit: %s", ident) + } + continue + case 'A' <= r && r <= 'Z': + continue + case 'a' <= r && r <= 'z': + continue + case r == '_': + continue + default: + return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident) + } + } + return nil +} + +func isHexDigit(r rune) bool { + switch { + case '0' <= r && r <= '9': + return true + case 'A' <= r && r <= 'F': + return true + case 'a' <= r && r <= 'f': + return true + } + return false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go new file mode 100644 index 0000000000..5a814a0004 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go @@ -0,0 +1,60 @@ +package httprule + +import ( + "fmt" + "strings" +) + +type template struct { + segments []segment + verb string + template string +} + +type segment interface { + fmt.Stringer + compile() (ops []op) +} + +type wildcard struct{} + +type deepWildcard struct{} + +type literal string + +type variable struct { + path string + segments []segment +} + +func (wildcard) String() string { + return "*" +} + +func (deepWildcard) String() string { + return "**" +} + +func (l literal) String() string { + return string(l) +} + +func (v variable) String() string { + var segs []string + for _, s := range v.segments { + segs = append(segs, s.String()) + } + return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/")) +} + +func (t template) String() string { + var segs []string + for _, s := range t.segments { + segs = append(segs, s.String()) + } + str := strings.Join(segs, "/") + if t.verb != "" { + str = fmt.Sprintf("%s:%s", str, t.verb) + } + return "/" + str +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel new file mode 100644 index 0000000000..b5140a3c9d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -0,0 +1,97 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "runtime", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime", + deps = [ + "//internal/httprule", + "//utilities", + "@go_googleapis//google/api:httpbody_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//grpclog", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//reflect/protoregistry", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +go_test( + name = "runtime_test", + size = "small", + srcs = [ + "context_test.go", + "convert_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_internal_test.go", + "mux_test.go", + "pattern_test.go", + "query_fuzz_test.go", + "query_test.go", + ], + embed = [":runtime"], + deps = [ + "//runtime/internal/examplepb", + "//utilities", + "@com_github_google_go_cmp//cmp", + "@com_github_google_go_cmp//cmp/cmpopts", + "@go_googleapis//google/api:httpbody_go_proto", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@go_googleapis//google/rpc:status_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//testing/protocmp", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +alias( + name = "go_default_library", + actual = ":runtime", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go similarity index 74% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index d8cbd4cc96..9b1b81f529 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -41,6 +41,25 @@ var ( DefaultContextTimeout = 0 * time.Second ) +// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed. +// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context. +var malformedHTTPHeaders = map[string]struct{}{ + "connection": {}, +} + +type ( + rpcMethodKey struct{} + httpPathPatternKey struct{} + + AnnotateContextOption func(ctx context.Context) context.Context +) + +func WithHTTPPathPattern(pattern string) AnnotateContextOption { + return func(ctx context.Context) context.Context { + return withHTTPPathPattern(ctx, pattern) + } +} + func decodeBinHeader(v string) ([]byte, error) { if len(v)%4 == 0 { // Input was padded, or padding was not necessary. @@ -56,8 +75,8 @@ At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", except that the forwarded destination is not another HTTP service but rather a gRPC service. */ -func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { - ctx, md, err := annotateContext(ctx, mux, req) +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) if err != nil { return nil, err } @@ -70,8 +89,8 @@ func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (con // AnnotateIncomingContext adds context information such as metadata from the request. // Attach metadata as incoming context. -func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { - ctx, md, err := annotateContext(ctx, mux, req) +func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) if err != nil { return nil, err } @@ -82,7 +101,11 @@ func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Reque return metadata.NewIncomingContext(ctx, md), nil } -func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) { +func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) { + ctx = withRPCMethod(ctx, rpcMethodName) + for _, o := range options { + ctx = o(ctx) + } var pairs []string timeout := DefaultContextTimeout if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { @@ -132,6 +155,7 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (con } if timeout != 0 { + //nolint:govet // The context outlives this function ctx, _ = context.WithTimeout(ctx, timeout) } if len(pairs) == 0 { @@ -154,11 +178,17 @@ type serverMetadataKey struct{} // NewServerMetadataContext creates a new context with ServerMetadata func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + if ctx == nil { + ctx = context.Background() + } return context.WithValue(ctx, serverMetadataKey{}, md) } // ServerMetadataFromContext returns the ServerMetadata in ctx func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + if ctx == nil { + return md, false + } md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) return } @@ -289,3 +319,46 @@ func isPermanentHTTPHeader(hdr string) bool { } return false } + +// isMalformedHTTPHeader checks whether header belongs to the list of +// "malformed headers" and would be rejected by the gRPC server. +func isMalformedHTTPHeader(header string) bool { + _, isMalformed := malformedHTTPHeaders[strings.ToLower(header)] + return isMalformed +} + +// RPCMethod returns the method string for the server context. The returned +// string is in the format of "/package.service/method". +func RPCMethod(ctx context.Context) (string, bool) { + m := ctx.Value(rpcMethodKey{}) + if m == nil { + return "", false + } + ms, ok := m.(string) + if !ok { + return "", false + } + return ms, true +} + +func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context { + return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName) +} + +// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists. +// The format of the returned string is defined by the google.api.http path template type. +func HTTPPathPattern(ctx context.Context) (string, bool) { + m := ctx.Value(httpPathPatternKey{}) + if m == nil { + return "", false + } + ms, ok := m.(string) + if !ok { + return "", false + } + return ms, true +} + +func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { + return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go similarity index 80% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index 2c279344dc..cfa540787f 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -6,10 +6,10 @@ import ( "strconv" "strings" - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/ptypes/duration" - "github.com/golang/protobuf/ptypes/timestamp" - "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" ) // String just returns the given string. @@ -205,9 +205,11 @@ func BytesSlice(val, sep string) ([][]byte, error) { } // Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. -func Timestamp(val string) (*timestamp.Timestamp, error) { - var r timestamp.Timestamp - err := jsonpb.UnmarshalString(val, &r) +func Timestamp(val string) (*timestamppb.Timestamp, error) { + var r timestamppb.Timestamp + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} + err := unmarshaler.Unmarshal([]byte(val), &r) if err != nil { return nil, err } @@ -215,9 +217,11 @@ func Timestamp(val string) (*timestamp.Timestamp, error) { } // Duration converts the given string into a timestamp.Duration. -func Duration(val string) (*duration.Duration, error) { - var r duration.Duration - err := jsonpb.UnmarshalString(val, &r) +func Duration(val string) (*durationpb.Duration, error) { + var r durationpb.Duration + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} + err := unmarshaler.Unmarshal([]byte(val), &r) if err != nil { return nil, err } @@ -261,58 +265,58 @@ func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { } /* - Support fot google.protobuf.wrappers on top of primitive types + Support for google.protobuf.wrappers on top of primitive types */ // StringValue well-known type support as wrapper around string type -func StringValue(val string) (*wrappers.StringValue, error) { - return &wrappers.StringValue{Value: val}, nil +func StringValue(val string) (*wrapperspb.StringValue, error) { + return &wrapperspb.StringValue{Value: val}, nil } // FloatValue well-known type support as wrapper around float32 type -func FloatValue(val string) (*wrappers.FloatValue, error) { +func FloatValue(val string) (*wrapperspb.FloatValue, error) { parsedVal, err := Float32(val) - return &wrappers.FloatValue{Value: parsedVal}, err + return &wrapperspb.FloatValue{Value: parsedVal}, err } // DoubleValue well-known type support as wrapper around float64 type -func DoubleValue(val string) (*wrappers.DoubleValue, error) { +func DoubleValue(val string) (*wrapperspb.DoubleValue, error) { parsedVal, err := Float64(val) - return &wrappers.DoubleValue{Value: parsedVal}, err + return &wrapperspb.DoubleValue{Value: parsedVal}, err } // BoolValue well-known type support as wrapper around bool type -func BoolValue(val string) (*wrappers.BoolValue, error) { +func BoolValue(val string) (*wrapperspb.BoolValue, error) { parsedVal, err := Bool(val) - return &wrappers.BoolValue{Value: parsedVal}, err + return &wrapperspb.BoolValue{Value: parsedVal}, err } // Int32Value well-known type support as wrapper around int32 type -func Int32Value(val string) (*wrappers.Int32Value, error) { +func Int32Value(val string) (*wrapperspb.Int32Value, error) { parsedVal, err := Int32(val) - return &wrappers.Int32Value{Value: parsedVal}, err + return &wrapperspb.Int32Value{Value: parsedVal}, err } // UInt32Value well-known type support as wrapper around uint32 type -func UInt32Value(val string) (*wrappers.UInt32Value, error) { +func UInt32Value(val string) (*wrapperspb.UInt32Value, error) { parsedVal, err := Uint32(val) - return &wrappers.UInt32Value{Value: parsedVal}, err + return &wrapperspb.UInt32Value{Value: parsedVal}, err } // Int64Value well-known type support as wrapper around int64 type -func Int64Value(val string) (*wrappers.Int64Value, error) { +func Int64Value(val string) (*wrapperspb.Int64Value, error) { parsedVal, err := Int64(val) - return &wrappers.Int64Value{Value: parsedVal}, err + return &wrapperspb.Int64Value{Value: parsedVal}, err } // UInt64Value well-known type support as wrapper around uint64 type -func UInt64Value(val string) (*wrappers.UInt64Value, error) { +func UInt64Value(val string) (*wrapperspb.UInt64Value, error) { parsedVal, err := Uint64(val) - return &wrappers.UInt64Value{Value: parsedVal}, err + return &wrapperspb.UInt64Value{Value: parsedVal}, err } // BytesValue well-known type support as wrapper around bytes[] type -func BytesValue(val string) (*wrappers.BytesValue, error) { +func BytesValue(val string) (*wrapperspb.BytesValue, error) { parsedVal, err := Bytes(val) - return &wrappers.BytesValue{Value: parsedVal}, err + return &wrapperspb.BytesValue{Value: parsedVal}, err } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go similarity index 100% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go new file mode 100644 index 0000000000..ec1c419339 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -0,0 +1,181 @@ +package runtime + +import ( + "context" + "errors" + "io" + "net/http" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// ErrorHandlerFunc is the signature used to configure error handling. +type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +// StreamErrorHandlerFunc is the signature used to configure stream error handling. +type StreamErrorHandlerFunc func(context.Context, error) *status.Status + +// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors. +type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int) + +// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error +// passed to the DefaultRoutingErrorHandler. +type HTTPStatusError struct { + HTTPStatus int + Err error +} + +func (e *HTTPStatusError) Error() string { + return e.Err.Error() +} + +// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. +// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +func HTTPStatusFromCode(code codes.Code) int { + switch code { + case codes.OK: + return http.StatusOK + case codes.Canceled: + return http.StatusRequestTimeout + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: + return http.StatusBadRequest + case codes.DeadlineExceeded: + return http.StatusGatewayTimeout + case codes.NotFound: + return http.StatusNotFound + case codes.AlreadyExists: + return http.StatusConflict + case codes.PermissionDenied: + return http.StatusForbidden + case codes.Unauthenticated: + return http.StatusUnauthorized + case codes.ResourceExhausted: + return http.StatusTooManyRequests + case codes.FailedPrecondition: + // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. + return http.StatusBadRequest + case codes.Aborted: + return http.StatusConflict + case codes.OutOfRange: + return http.StatusBadRequest + case codes.Unimplemented: + return http.StatusNotImplemented + case codes.Internal: + return http.StatusInternalServerError + case codes.Unavailable: + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError + } + + grpclog.Infof("Unknown gRPC error code: %v", code) + return http.StatusInternalServerError +} + +// HTTPError uses the mux-configured error handler. +func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + mux.errorHandler(ctx, mux, marshaler, w, r, err) +} + +// DefaultHTTPErrorHandler is the default error handler. +// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. +// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is +// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler +// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode +// are insufficient for. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body written by this function is a Status message marshaled by the Marshaler. +func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + + var customStatus *HTTPStatusError + if errors.As(err, &customStatus) { + err = customStatus.Err + } + + s := status.Convert(err) + pb := s.Proto() + + w.Header().Del("Trailer") + w.Header().Del("Transfer-Encoding") + + contentType := marshaler.ContentType(pb) + w.Header().Set("Content-Type", contentType) + + if s.Code() == codes.Unauthenticated { + w.Header().Set("WWW-Authenticate", s.Message()) + } + + buf, merr := marshaler.Marshal(pb) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", s, merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(r) + + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + + st := HTTPStatusFromCode(s.Code()) + if customStatus != nil { + st = customStatus.HTTPStatus + } + + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + if doForwardTrailers { + handleForwardResponseTrailer(w, md) + } +} + +func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status { + return status.Convert(err) +} + +// DefaultRoutingErrorHandler is our default handler for routing errors. +// By default http error codes mapped on the following error codes: +// +// NotFound -> grpc.NotFound +// StatusBadRequest -> grpc.InvalidArgument +// MethodNotAllowed -> grpc.Unimplemented +// Other -> grpc.Internal, method is not expecting to be called for anything else +func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) { + sterr := status.Error(codes.Internal, "Unexpected routing error") + switch httpStatus { + case http.StatusBadRequest: + sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus)) + case http.StatusMethodNotAllowed: + sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus)) + case http.StatusNotFound: + sterr = status.Error(codes.NotFound, http.StatusText(httpStatus)) + } + mux.errorHandler(ctx, mux, marshaler, w, r, sterr) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go new file mode 100644 index 0000000000..82ab3d277b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -0,0 +1,165 @@ +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "sort" + + "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor { + fd := fields.ByName(protoreflect.Name(name)) + if fd != nil { + return fd + } + + return fields.ByJSONName(name) +} + +// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. +func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) { + fm := &field_mask.FieldMask{} + var root interface{} + + if err := json.NewDecoder(r).Decode(&root); err != nil { + if err == io.EOF { + return fm, nil + } + return nil, err + } + + queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}} + for len(queue) > 0 { + // dequeue an item + item := queue[0] + queue = queue[1:] + + m, ok := item.node.(map[string]interface{}) + switch { + case ok: + // if the item is an object, then enqueue all of its children + for k, v := range m { + if item.msg == nil { + return nil, fmt.Errorf("JSON structure did not match request type") + } + + fd := getFieldByName(item.msg.Descriptor().Fields(), k) + if fd == nil { + return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName()) + } + + if isDynamicProtoMessage(fd.Message()) { + for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) { + newPath := p + if item.path != "" { + newPath = item.path + "." + newPath + } + queue = append(queue, fieldMaskPathItem{path: newPath}) + } + continue + } + + if isProtobufAnyMessage(fd.Message()) { + _, hasTypeField := v.(map[string]interface{})["@type"] + if hasTypeField { + queue = append(queue, fieldMaskPathItem{path: k}) + continue + } else { + return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName()) + } + + } + + child := fieldMaskPathItem{ + node: v, + } + if item.path == "" { + child.path = string(fd.FullName().Name()) + } else { + child.path = item.path + "." + string(fd.FullName().Name()) + } + + switch { + case fd.IsList(), fd.IsMap(): + // As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86 + // Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop. + fm.Paths = append(fm.Paths, child.path) + case fd.Message() != nil: + child.msg = item.msg.Get(fd).Message() + fallthrough + default: + queue = append(queue, child) + } + } + case len(item.path) > 0: + // otherwise, it's a leaf node so print its path + fm.Paths = append(fm.Paths, item.path) + } + } + + // Sort for deterministic output in the presence + // of repeated fields. + sort.Strings(fm.Paths) + + return fm, nil +} + +func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool { + return md != nil && (md.FullName() == "google.protobuf.Any") +} + +func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool { + return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value") +} + +// buildPathsBlindly does not attempt to match proto field names to the +// json value keys. Instead it relies completely on the structure of +// the unmarshalled json contained within in. +// Returns a slice containing all subpaths with the root at the +// passed in name and json value. +func buildPathsBlindly(name string, in interface{}) []string { + m, ok := in.(map[string]interface{}) + if !ok { + return []string{name} + } + + var paths []string + queue := []fieldMaskPathItem{{path: name, node: m}} + for len(queue) > 0 { + cur := queue[0] + queue = queue[1:] + + m, ok := cur.node.(map[string]interface{}) + if !ok { + // This should never happen since we should always check that we only add + // nodes of type map[string]interface{} to the queue. + continue + } + for k, v := range m { + if mi, ok := v.(map[string]interface{}); ok { + queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi}) + } else { + // This is not a struct, so there are no more levels to descend. + curPath := cur.path + "." + k + paths = append(paths, curPath) + } + } + } + return paths +} + +// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +type fieldMaskPathItem struct { + // the list of prior fields leading up to node connected by dots + path string + + // a generic decoded json object the current item to inspect for further path extraction + node interface{} + + // parent message + msg protoreflect.Message +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go similarity index 74% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index e6e8f286e1..72080c50d7 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -2,19 +2,19 @@ package runtime import ( "context" - "errors" "fmt" "io" "net/http" "net/textproto" + "strings" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/genproto/googleapis/api/httpbody" + "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) -var errEmptyResponse = errors.New("empty response") - // ForwardResponseStream forwards the stream from gRPC server to REST client. func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { f, ok := w.(http.Flusher) @@ -33,7 +33,6 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal handleForwardResponseServerMetadata(w, mux, md) w.Header().Set("Transfer-Encoding", "chunked") - w.Header().Set("Content-Type", marshaler.ContentType()) if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { HTTPError(ctx, mux, marshaler, w, req, err) return @@ -53,18 +52,25 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal return } if err != nil { - handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) return } if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) return } + if !wroteHeader { + w.Header().Set("Content-Type", marshaler.ContentType(resp)) + } + var buf []byte + httpBody, isHTTPBody := resp.(*httpbody.HttpBody) switch { case resp == nil: - buf, err = marshaler.Marshal(errorChunk(streamError(ctx, mux.streamErrorHandler, errEmptyResponse))) + buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) + case isHTTPBody: + buf = httpBody.GetData() default: result := map[string]interface{}{"result": resp} if rb, ok := resp.(responseBody); ok { @@ -76,7 +82,7 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal if err != nil { grpclog.Infof("Failed to marshal response chunk: %v", err) - handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) return } if _, err = w.Write(buf); err != nil { @@ -132,15 +138,22 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha } handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(req) + + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + handleForwardResponseTrailerHeader(w, md) - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatibility - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { - contentType = typeMarshaler.ContentTypeFromMessage(resp) - } + contentType := marshaler.ContentType(resp) w.Header().Set("Content-Type", contentType) if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { @@ -164,7 +177,14 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha grpclog.Infof("Failed to write response: %v", err) } - handleForwardResponseTrailer(w, md) + if doForwardTrailers { + handleForwardResponseTrailer(w, md) + } +} + +func requestAcceptsTrailers(req *http.Request) bool { + te := req.Header.Get("TE") + return strings.Contains(strings.ToLower(te), "trailers") } func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { @@ -180,12 +200,14 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re return nil } -func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { - serr := streamError(ctx, mux.streamErrorHandler, err) +func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) if !wroteHeader { - w.WriteHeader(int(serr.HttpCode)) + w.Header().Set("Content-Type", marshaler.ContentType(msg)) + w.WriteHeader(HTTPStatusFromCode(st.Code())) } - buf, merr := marshaler.Marshal(errorChunk(serr)) + buf, merr := marshaler.Marshal(msg) if merr != nil { grpclog.Infof("Failed to marshal an error: %v", merr) return @@ -194,19 +216,12 @@ func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, mar grpclog.Infof("Failed to notify error to client: %v", werr) return } -} - -// streamError returns the payload for the final message in a response stream -// that represents the given err. -func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError { - serr := errHandler(ctx, err) - if serr != nil { - return serr + if _, derr := w.Write(delimiter); derr != nil { + grpclog.Infof("Failed to send delimiter chunk: %v", err) + return } - // TODO: log about misbehaving stream error handler? - return DefaultHTTPStreamErrorHandler(ctx, err) } -func errorChunk(err *StreamError) map[string]proto.Message { - return map[string]proto.Message{"error": (*internal.StreamError)(err)} +func errorChunk(st *status.Status) map[string]proto.Message { + return map[string]proto.Message{"error": st.Proto()} } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go similarity index 54% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go index 525b0338c7..b86135c889 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go @@ -4,13 +4,6 @@ import ( "google.golang.org/genproto/googleapis/api/httpbody" ) -// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler -func SetHTTPBodyMarshaler(serveMux *ServeMux) { - serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{ - Marshaler: &JSONPb{OrigName: true}, - } -} - // HTTPBodyMarshaler is a Marshaler which supports marshaling of a // google.api.HttpBody message as the full response body if it is // the actual message used as the response. If not, then this will @@ -19,18 +12,14 @@ type HTTPBodyMarshaler struct { Marshaler } -// ContentType implementation to keep backwards compatibility with marshal interface -func (h *HTTPBodyMarshaler) ContentType() string { - return h.ContentTypeFromMessage(nil) -} - -// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns -// its specified content type otherwise fall back to the default Marshaler. -func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string { +// ContentType returns its specified content type in case v is a +// google.api.HttpBody message, otherwise it will fall back to the default Marshalers +// content type. +func (h *HTTPBodyMarshaler) ContentType(v interface{}) string { if httpBody, ok := v.(*httpbody.HttpBody); ok { return httpBody.GetContentType() } - return h.Marshaler.ContentType() + return h.Marshaler.ContentType(v) } // Marshal marshals "v" by returning the body bytes if v is a diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go similarity index 95% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go index f9d3a585a4..d6aa825783 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go @@ -15,7 +15,7 @@ import ( type JSONBuiltin struct{} // ContentType always Returns "application/json". -func (*JSONBuiltin) ContentType() string { +func (*JSONBuiltin) ContentType(_ interface{}) string { return "application/json" } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go similarity index 57% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go index f0de351b21..524ea057cc 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -6,21 +6,25 @@ import ( "fmt" "io" "reflect" + "strconv" - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // JSONPb is a Marshaler which marshals/unmarshals into/from JSON -// with the "github.com/golang/protobuf/jsonpb". -// It supports fully functionality of protobuf unlike JSONBuiltin. +// with the "google.golang.org/protobuf/encoding/protojson" marshaler. +// It supports the full functionality of protobuf unlike JSONBuiltin. // // The NewDecoder method returns a DecoderWrapper, so the underlying // *json.Decoder methods can be used. -type JSONPb jsonpb.Marshaler +type JSONPb struct { + protojson.MarshalOptions + protojson.UnmarshalOptions +} // ContentType always returns "application/json". -func (*JSONPb) ContentType() string { +func (*JSONPb) ContentType(_ interface{}) string { return "application/json" } @@ -47,7 +51,13 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { _, err = w.Write(buf) return err } - return (*jsonpb.Marshaler)(j).Marshal(w, p) + b, err := j.MarshalOptions.Marshal(p) + if err != nil { + return err + } + + _, err = w.Write(b) + return err } var ( @@ -56,8 +66,8 @@ var ( ) // marshalNonProto marshals a non-message field of a protobuf message. -// This function does not correctly marshals arbitrary data structure into JSON, -// but it is only capable of marshaling non-message field values of protobuf, +// This function does not correctly marshal arbitrary data structures into JSON, +// it is only capable of marshaling non-message field values of protobuf, // i.e. primitive types, enums; pointers to primitives or enums; maps from // integer/string types to primitives/enums/pointers to messages. func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { @@ -74,7 +84,7 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { if rv.Kind() == reflect.Slice { if rv.IsNil() { - if j.EmitDefaults { + if j.EmitUnpopulated { return []byte("[]"), nil } return []byte("null"), nil @@ -93,7 +103,37 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { return nil, err } } - if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + if err = j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + err = buf.WriteByte(']') + if err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + + if rv.Type().Elem().Implements(typeProtoEnum) { + var buf bytes.Buffer + err := buf.WriteByte('[') + if err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + err = buf.WriteByte(',') + if err != nil { + return nil, err + } + } + if j.UseEnumNumbers { + _, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10)) + } else { + _, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"") + } + if err != nil { return nil, err } } @@ -120,7 +160,7 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { } return json.Marshal(m) } - if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { + if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers { return json.Marshal(enum.String()) } return json.Marshal(rv.Interface()) @@ -128,25 +168,29 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { // Unmarshal unmarshals JSON "data" into "v" func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { - return unmarshalJSONPb(data, v) + return unmarshalJSONPb(data, j.UnmarshalOptions, v) } // NewDecoder returns a Decoder which reads JSON stream from "r". func (j *JSONPb) NewDecoder(r io.Reader) Decoder { d := json.NewDecoder(r) - return DecoderWrapper{Decoder: d} + return DecoderWrapper{ + Decoder: d, + UnmarshalOptions: j.UnmarshalOptions, + } } // DecoderWrapper is a wrapper around a *json.Decoder that adds // support for protos to the Decode method. type DecoderWrapper struct { *json.Decoder + protojson.UnmarshalOptions } // Decode wraps the embedded decoder's Decode method to support // protos using a jsonpb.Unmarshaler. func (d DecoderWrapper) Decode(v interface{}) error { - return decodeJSONPb(d.Decoder, v) + return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v) } // NewEncoder returns an Encoder which writes JSON stream into "w". @@ -162,21 +206,28 @@ func (j *JSONPb) NewEncoder(w io.Writer) Encoder { }) } -func unmarshalJSONPb(data []byte, v interface{}) error { +func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error { d := json.NewDecoder(bytes.NewReader(data)) - return decodeJSONPb(d, v) + return decodeJSONPb(d, unmarshaler, v) } -func decodeJSONPb(d *json.Decoder, v interface{}) error { +func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error { p, ok := v.(proto.Message) if !ok { - return decodeNonProtoField(d, v) + return decodeNonProtoField(d, unmarshaler, v) } - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} - return unmarshaler.UnmarshalNext(d, p) + + // Decode into bytes for marshalling + var b json.RawMessage + err := d.Decode(&b) + if err != nil { + return err + } + + return unmarshaler.Unmarshal([]byte(b), p) } -func decodeNonProtoField(d *json.Decoder, v interface{}) error { +func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr { return fmt.Errorf("%T is not a pointer", v) @@ -186,8 +237,14 @@ func decodeNonProtoField(d *json.Decoder, v interface{}) error { rv.Set(reflect.New(rv.Type().Elem())) } if rv.Type().ConvertibleTo(typeProtoMessage) { - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} - return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) + // Decode into bytes for marshalling + var b json.RawMessage + err := d.Decode(&b) + if err != nil { + return err + } + + return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message)) } rv = rv.Elem() } @@ -211,24 +268,56 @@ func decodeNonProtoField(d *json.Decoder, v interface{}) error { } bk := result[0] bv := reflect.New(rv.Type().Elem()) - if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { + if v == nil { + null := json.RawMessage("null") + v = &null + } + if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil { return err } rv.SetMapIndex(bk, bv.Elem()) } return nil } + if rv.Kind() == reflect.Slice { + if rv.Type().Elem().Kind() == reflect.Uint8 { + var sl []byte + if err := d.Decode(&sl); err != nil { + return err + } + if sl != nil { + rv.SetBytes(sl) + } + return nil + } + + var sl []json.RawMessage + if err := d.Decode(&sl); err != nil { + return err + } + if sl != nil { + rv.Set(reflect.MakeSlice(rv.Type(), 0, 0)) + } + for _, item := range sl { + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil { + return err + } + rv.Set(reflect.Append(rv, bv.Elem())) + } + return nil + } if _, ok := rv.Interface().(protoEnum); ok { var repr interface{} if err := d.Decode(&repr); err != nil { return err } - switch repr.(type) { + switch v := repr.(type) { case string: // TODO(yugui) Should use proto.StructProperties? return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) case float64: - rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) + rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type())) return nil default: return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) @@ -242,6 +331,8 @@ type protoEnum interface { EnumDescriptor() ([]byte, []int) } +var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem() + var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() // Delimiter for newline encoded JSON streams. @@ -249,14 +340,16 @@ func (j *JSONPb) Delimiter() []byte { return []byte("\n") } -// allowUnknownFields helps not to return an error when the destination -// is a struct and the input contains object keys which do not match any -// non-ignored, exported fields in the destination. -var allowUnknownFields = true - -// DisallowUnknownFields enables option in decoder (unmarshaller) to -// return an error when it finds an unknown field. This function must be -// called before using the JSON marshaller. -func DisallowUnknownFields() { - allowUnknownFields = false -} +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(String), + reflect.Bool: reflect.ValueOf(Bool), + reflect.Float64: reflect.ValueOf(Float64), + reflect.Float32: reflect.ValueOf(Float32), + reflect.Int64: reflect.ValueOf(Int64), + reflect.Int32: reflect.ValueOf(Int32), + reflect.Uint64: reflect.ValueOf(Uint64), + reflect.Uint32: reflect.ValueOf(Uint32), + reflect.Slice: reflect.ValueOf(Bytes), + } +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go similarity index 93% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go index f65d1a2676..007f8f1a2c 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go @@ -4,15 +4,16 @@ import ( "io" "errors" - "github.com/golang/protobuf/proto" "io/ioutil" + + "google.golang.org/protobuf/proto" ) // ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes type ProtoMarshaller struct{} // ContentType always returns "application/octet-stream". -func (*ProtoMarshaller) ContentType() string { +func (*ProtoMarshaller) ContentType(_ interface{}) string { return "application/octet-stream" } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go similarity index 80% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go index 4615329421..2c0d25ff49 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -16,14 +16,9 @@ type Marshaler interface { // NewEncoder returns an Encoder which writes bytes sequence into "w". NewEncoder(w io.Writer) Encoder // ContentType returns the Content-Type which this marshaler is responsible for. - ContentType() string -} - -// Marshalers that implement contentTypeMarshaler will have their ContentTypeFromMessage method called -// to set the Content-Type header on the response -type contentTypeMarshaler interface { - // ContentTypeFromMessage returns the Content-Type this marshaler produces from the provided message - ContentTypeFromMessage(v interface{}) string + // The parameter describes the type which is being marshalled, which can sometimes + // affect the content type returned. + ContentType(v interface{}) string } // Decoder decodes a byte sequence diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go similarity index 91% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index 8dd5c24db4..a714de0240 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -6,6 +6,7 @@ import ( "net/http" "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/encoding/protojson" ) // MIMEWildcard is the fallback MIME type used for requests which do not match @@ -16,7 +17,16 @@ var ( acceptHeader = http.CanonicalHeaderKey("Accept") contentTypeHeader = http.CanonicalHeaderKey("Content-Type") - defaultMarshaler = &JSONPb{OrigName: true} + defaultMarshaler = &HTTPBodyMarshaler{ + Marshaler: &JSONPb{ + MarshalOptions: protojson.MarshalOptions{ + EmitUnpopulated: true, + }, + UnmarshalOptions: protojson.UnmarshalOptions{ + DiscardUnknown: true, + }, + }, + } ) // MarshalerForRequest returns the inbound/outbound marshalers for this request. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go new file mode 100644 index 0000000000..8ba71a9e05 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -0,0 +1,442 @@ +package runtime + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/textproto" + "regexp" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// UnescapingMode defines the behavior of ServeMux when unescaping path parameters. +type UnescapingMode int + +const ( + // UnescapingModeLegacy is the default V2 behavior, which escapes the entire + // path string before doing any routing. + UnescapingModeLegacy UnescapingMode = iota + + // UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570 + // reserved characters. + UnescapingModeAllExceptReserved + + // UnescapingModeAllExceptSlash unescapes URL path parameters except path + // separators, which will be left as "%2F". + UnescapingModeAllExceptSlash + + // UnescapingModeAllCharacters unescapes all URL path parameters. + UnescapingModeAllCharacters + + // UnescapingModeDefault is the default escaping type. + // TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's + // reference implementation + UnescapingModeDefault = UnescapingModeLegacy +) + +var ( + encodedPathSplitter = regexp.MustCompile("(/|%2F)") +) + +// A HandlerFunc handles a specific pair of path pattern and HTTP method. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +// ServeMux is a request multiplexer for grpc-gateway. +// It matches http requests to patterns and invokes the corresponding handler. +type ServeMux struct { + // handlers maps HTTP method to a list of handlers. + handlers map[string][]handler + forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + metadataAnnotators []func(context.Context, *http.Request) metadata.MD + errorHandler ErrorHandlerFunc + streamErrorHandler StreamErrorHandlerFunc + routingErrorHandler RoutingErrorHandlerFunc + disablePathLengthFallback bool + unescapingMode UnescapingMode +} + +// ServeMuxOption is an option that can be given to a ServeMux on construction. +type ServeMuxOption func(*ServeMux) + +// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. +// +// forwardResponseOption is an option that will be called on the relevant context.Context, +// http.ResponseWriter, and proto.Message before every forwarded response. +// +// The message may be nil in the case where just a header is being sent. +func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) + } +} + +// WithEscapingType sets the escaping type. See the definitions of UnescapingMode +// for more information. +func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.unescapingMode = mode + } +} + +// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. +// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be +// done with careful consideration. +func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption { + return func(serveMux *ServeMux) { + currentQueryParser = queryParameterParser + } +} + +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with +// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. +func DefaultHeaderMatcher(key string) (string, bool) { + key = textproto.CanonicalMIMEHeaderKey(key) + if isPermanentHTTPHeader(key) { + return MetadataPrefix + key, true + } else if strings.HasPrefix(key, MetadataHeaderPrefix) { + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + for _, header := range fn.matchedMalformedHeaders() { + grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header) + } + + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server. +func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string { + if fn == nil { + return nil + } + headers := make([]string, 0) + for header := range malformedHTTPHeaders { + out, accept := fn(header) + if accept && isMalformedHTTPHeader(out) { + headers = append(headers, out) + } + } + return headers +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) + } +} + +// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler. +// +// This can be used to configure a custom error response. +func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.errorHandler = fn + } +} + +// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream +// error handler, which allows for customizing the error trailer for server-streaming +// calls. +// +// For stream errors that occur before any response has been written, the mux's +// ErrorHandler will be invoked. However, once data has been written, the errors must +// be handled differently: they must be included in the response body. The response body's +// final message will include the error details returned by the stream error handler. +func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.streamErrorHandler = fn + } +} + +// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors. +// +// Method called for errors which can happen before gRPC route selected or executed. +// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest +func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.routingErrorHandler = fn + } +} + +// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. +func WithDisablePathLengthFallback() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.disablePathLengthFallback = true + } +} + +// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath. +// When called the handler will forward the request to the upstream grpc service health check (defined in the +// gRPC Health Checking Protocol). +// +// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how +// to setup the protocol in the grpc server. +// +// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest. +func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption { + return func(s *ServeMux) { + // error can be ignored since pattern is definitely valid + _ = s.HandlePath( + http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string, + ) { + _, outboundMarshaler := MarshalerForRequest(s, r) + + resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{ + Service: r.URL.Query().Get("service"), + }) + if err != nil { + s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + return + } + + w.Header().Set("Content-Type", "application/json") + + if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { + var err error + switch resp.GetStatus() { + case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN: + err = status.Error(codes.Unavailable, resp.String()) + case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN: + err = status.Error(codes.NotFound, resp.String()) + } + + s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + return + } + + _ = outboundMarshaler.NewEncoder(w).Encode(resp) + }) + } +} + +// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux. +// +// See WithHealthEndpointAt for the general implementation. +func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption { + return WithHealthEndpointAt(healthCheckClient, "/healthz") +} + +// NewServeMux returns a new ServeMux whose internal mapping is empty. +func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + marshalers: makeMarshalerMIMERegistry(), + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, + } + + for _, opt := range opts { + opt(serveMux) + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true + } + } + + return serveMux +} + +// Handle associates "h" to the pair of HTTP method and path pattern. +func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) +} + +// HandlePath allows users to configure custom path handlers. +// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/ +func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error { + compiler, err := httprule.Parse(pathPattern) + if err != nil { + return fmt.Errorf("parsing path pattern: %w", err) + } + tp := compiler.Compile() + pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb) + if err != nil { + return fmt.Errorf("creating new pattern: %w", err) + } + s.Handle(meth, pattern, h) + return nil +} + +// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path. +func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + path := r.URL.Path + if !strings.HasPrefix(path, "/") { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest) + return + } + + // TODO(v3): remove UnescapingModeLegacy + if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" { + path = r.URL.RawPath + } + + var components []string + // since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F" + // in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the + // path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default + // behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved + if s.unescapingMode == UnescapingModeAllCharacters { + components = encodedPathSplitter.Split(path[1:], -1) + } else { + components = strings.Split(path[1:], "/") + } + + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + r.Method = strings.ToUpper(override) + if err := r.ParseForm(); err != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) + return + } + } + + // Verb out here is to memoize for the fallback case below + var verb string + + for _, h := range s.handlers[r.Method] { + // If the pattern has a verb, explicitly look for a suffix in the last + // component that matches a colon plus the verb. This allows us to + // handle some cases that otherwise can't be correctly handled by the + // former LastIndex case, such as when the verb literal itself contains + // a colon. This should work for all cases that have run through the + // parser because we know what verb we're looking for, however, there + // are still some cases that the parser itself cannot disambiguate. See + // the comment there if interested. + patVerb := h.pat.Verb() + l := len(components) + lastComponent := components[l-1] + var idx int = -1 + if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) { + idx = len(lastComponent) - len(patVerb) - 1 + } + if idx == 0 { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) + return + } + if idx > 0 { + components[l-1], verb = lastComponent[:idx], lastComponent[idx+1:] + } + + pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{ + HTTPStatus: http.StatusBadRequest, + Err: mse, + }) + } + continue + } + h.h(w, r, pathParams) + return + } + + // lookup other methods to handle fallback from GET to POST and + // to determine if it is NotImplemented or NotFound. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { + pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{ + HTTPStatus: http.StatusBadRequest, + Err: mse, + }) + } + continue + } + // X-HTTP-Method-Override is optional. Always allow fallback to POST. + if s.isPathLengthFallback(r) { + if err := r.ParseForm(); err != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) + return + } + h.h(w, r, pathParams) + return + } + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed) + return + } + } + + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) +} + +// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. +func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { + return s.forwardResponseOptions +} + +func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { + return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" +} + +type handler struct { + pat Pattern + h HandlerFunc +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go similarity index 57% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go index 09053695da..df7cb81426 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go @@ -3,9 +3,10 @@ package runtime import ( "errors" "fmt" + "strconv" "strings" - "github.com/grpc-ecosystem/grpc-gateway/utilities" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" "google.golang.org/grpc/grpclog" ) @@ -14,14 +15,23 @@ var ( ErrNotMatch = errors.New("not match to the path pattern") // ErrInvalidPattern indicates that the given definition of Pattern is not valid. ErrInvalidPattern = errors.New("invalid pattern") + // ErrMalformedSequence indicates that an escape sequence was malformed. + ErrMalformedSequence = errors.New("malformed escape sequence") ) +type MalformedSequenceError string + +func (e MalformedSequenceError) Error() string { + return "malformed path escape " + strconv.Quote(string(e)) +} + type op struct { code utilities.OpCode operand int } -// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. +// Pattern is a template pattern of http request paths defined in +// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto type Pattern struct { // ops is a list of operations ops []op @@ -35,31 +45,14 @@ type Pattern struct { tailLen int // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. verb string - // assumeColonVerb indicates whether a path suffix after a final - // colon may only be interpreted as a verb. - assumeColonVerb bool } -type patternOptions struct { - assumeColonVerb bool -} - -// PatternOpt is an option for creating Patterns. -type PatternOpt func(*patternOptions) - // NewPattern returns a new Pattern from the given definition values. // "ops" is a sequence of op codes. "pool" is a constant pool. // "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. // "version" must be 1 for now. // It returns an error if the given definition is invalid. -func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) { - options := patternOptions{ - assumeColonVerb: true, - } - for _, o := range opts { - o(&options) - } - +func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { if version != 1 { grpclog.Infof("unsupported version: %d", version) return Pattern{}, ErrInvalidPattern @@ -111,7 +104,7 @@ func NewPattern(version int, ops []int, pool []string, verb string, opts ...Patt } stack -= op.operand if stack < 0 { - grpclog.Print("stack underflow") + grpclog.Info("stack underflow") return Pattern{}, ErrInvalidPattern } stack++ @@ -139,13 +132,12 @@ func NewPattern(version int, ops []int, pool []string, verb string, opts ...Patt typedOps = append(typedOps, op) } return Pattern{ - ops: typedOps, - pool: pool, - vars: vars, - stacksize: maxstack, - tailLen: tailLen, - verb: verb, - assumeColonVerb: options.assumeColonVerb, + ops: typedOps, + pool: pool, + vars: vars, + stacksize: maxstack, + tailLen: tailLen, + verb: verb, }, nil } @@ -157,12 +149,13 @@ func MustPattern(p Pattern, err error) Pattern { return p } -// Match examines components if it matches to the Pattern. -// If it matches, the function returns a mapping from field paths to their captured values. -// If otherwise, the function returns an error. -func (p Pattern) Match(components []string, verb string) (map[string]string, error) { +// MatchAndEscape examines components to determine if they match to a Pattern. +// MatchAndEscape will return an error if no Patterns matched or if a pattern +// matched but contained malformed escape sequences. If successful, the function +// returns a mapping from field paths to their captured values. +func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) { if p.verb != verb { - if p.assumeColonVerb || p.verb != "" { + if p.verb != "" { return nil, ErrNotMatch } if len(components) == 0 { @@ -171,7 +164,6 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err components = append([]string{}, components...) components[len(components)-1] += ":" + verb } - verb = "" } var pos int @@ -179,6 +171,8 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err captured := make([]string, len(p.vars)) l := len(components) for _, op := range p.ops { + var err error + switch op.code { case utilities.OpNop: continue @@ -191,6 +185,10 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err if lit := p.pool[op.operand]; c != lit { return nil, ErrNotMatch } + } else if op.code == utilities.OpPush { + if c, err = unescape(c, unescapingMode, false); err != nil { + return nil, err + } } stack = append(stack, c) pos++ @@ -200,7 +198,11 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err return nil, ErrNotMatch } end -= p.tailLen - stack = append(stack, strings.Join(components[pos:end], "/")) + c := strings.Join(components[pos:end], "/") + if c, err = unescape(c, unescapingMode, true); err != nil { + return nil, err + } + stack = append(stack, c) pos = end case utilities.OpConcatN: n := op.operand @@ -222,6 +224,16 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err return bindings, nil } +// MatchAndEscape examines components to determine if they match to a Pattern. +// It will never perform per-component unescaping (see: UnescapingModeLegacy). +// MatchAndEscape will return an error if no Patterns matched. If successful, +// the function returns a mapping from field paths to their captured values. +// +// Deprecated: Use MatchAndEscape. +func (p Pattern) Match(components []string, verb string) (map[string]string, error) { + return p.MatchAndEscape(components, verb, UnescapingModeDefault) +} + // Verb returns the verb part of the Pattern. func (p Pattern) Verb() string { return p.verb } @@ -253,10 +265,119 @@ func (p Pattern) String() string { return "/" + segs } -// AssumeColonVerbOpt indicates whether a path suffix after a final -// colon may only be interpreted as a verb. -func AssumeColonVerbOpt(val bool) PatternOpt { - return PatternOpt(func(o *patternOptions) { - o.assumeColonVerb = val - }) +/* + * The following code is adopted and modified from Go's standard library + * and carries the attached license. + * + * Copyright 2009 The Go Authors. All rights reserved. + * Use of this source code is governed by a BSD-style + * license that can be found in the LICENSE file. + */ + +// ishex returns whether or not the given byte is a valid hex character +func ishex(c byte) bool { + switch { + case '0' <= c && c <= '9': + return true + case 'a' <= c && c <= 'f': + return true + case 'A' <= c && c <= 'F': + return true + } + return false +} + +func isRFC6570Reserved(c byte) bool { + switch c { + case '!', '#', '$', '&', '\'', '(', ')', '*', + '+', ',', '/', ':', ';', '=', '?', '@', '[', ']': + return true + default: + return false + } +} + +// unhex converts a hex point to the bit representation +func unhex(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 0 +} + +// shouldUnescapeWithMode returns true if the character is escapable with the +// given mode +func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool { + switch mode { + case UnescapingModeAllExceptReserved: + if isRFC6570Reserved(c) { + return false + } + case UnescapingModeAllExceptSlash: + if c == '/' { + return false + } + case UnescapingModeAllCharacters: + return true + } + return true +} + +// unescape unescapes a path string using the provided mode +func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) { + // TODO(v3): remove UnescapingModeLegacy + if mode == UnescapingModeLegacy { + return s, nil + } + + if !multisegment { + mode = UnescapingModeAllCharacters + } + + // Count %, check that they're well-formed. + n := 0 + for i := 0; i < len(s); { + if s[i] == '%' { + n++ + if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) { + s = s[i:] + if len(s) > 3 { + s = s[:3] + } + + return "", MalformedSequenceError(s) + } + i += 3 + } else { + i++ + } + } + + if n == 0 { + return s, nil + } + + var t strings.Builder + t.Grow(len(s)) + for i := 0; i < len(s); i++ { + switch s[i] { + case '%': + c := unhex(s[i+1])<<4 | unhex(s[i+2]) + if shouldUnescapeWithMode(c, mode) { + t.WriteByte(c) + i += 2 + continue + } + fallthrough + default: + t.WriteByte(s[i]) + } + } + + return t.String(), nil } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go similarity index 98% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go index a3151e2a55..d549407f20 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -1,7 +1,7 @@ package runtime import ( - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" ) // StringP returns a pointer to a string whose pointee is same as the given string value. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go new file mode 100644 index 0000000000..65d0da4716 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -0,0 +1,342 @@ +package runtime + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`) + +var currentQueryParser QueryParameterParser = &DefaultQueryParser{} + +// QueryParameterParser defines interface for all query parameter parsers +type QueryParameterParser interface { + Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error +} + +// PopulateQueryParameters parses query parameters +// into "msg" using current query parser +func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + return currentQueryParser.Parse(msg, values, filter) +} + +// DefaultQueryParser is a QueryParameterParser which implements the default +// query parameters parsing behavior. +// +// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context. +type DefaultQueryParser struct{} + +// Parse populates "values" into "msg". +// A value is ignored if its key starts with one of the elements in "filter". +func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { + match := valuesKeyRegexp.FindStringSubmatch(key) + if len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } + fieldPath := strings.Split(key, ".") + if filter.HasCommonPrefix(fieldPath) { + continue + } + if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil { + return err + } + } + return nil +} + +// PopulateFieldFromPath sets a value in a nested Protobuf structure. +func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { + fieldPath := strings.Split(fieldPathString, ".") + return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value}) +} + +func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error { + if len(fieldPath) < 1 { + return errors.New("no field path") + } + if len(values) < 1 { + return errors.New("no value provided") + } + + var fieldDescriptor protoreflect.FieldDescriptor + for i, fieldName := range fieldPath { + fields := msgValue.Descriptor().Fields() + + // Get field by name + fieldDescriptor = fields.ByName(protoreflect.Name(fieldName)) + if fieldDescriptor == nil { + fieldDescriptor = fields.ByJSONName(fieldName) + if fieldDescriptor == nil { + // We're not returning an error here because this could just be + // an extra query parameter that isn't part of the request. + grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, ".")) + return nil + } + } + + // If this is the last element, we're done + if i == len(fieldPath)-1 { + break + } + + // Only singular message fields are allowed + if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated { + return fmt.Errorf("invalid path: %q is not a message", fieldName) + } + + // Get the nested message + msgValue = msgValue.Mutable(fieldDescriptor).Message() + } + + // Check if oneof already set + if of := fieldDescriptor.ContainingOneof(); of != nil { + if f := msgValue.WhichOneof(of); f != nil { + return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) + } + } + + switch { + case fieldDescriptor.IsList(): + return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values) + case fieldDescriptor.IsMap(): + return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values) + } + + if len(values) > 1 { + return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", ")) + } + + return populateField(fieldDescriptor, msgValue, values[0]) +} + +func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error { + v, err := parseField(fieldDescriptor, value) + if err != nil { + return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err) + } + + msgValue.Set(fieldDescriptor, v) + return nil +} + +func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error { + for _, value := range values { + v, err := parseField(fieldDescriptor, value) + if err != nil { + return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err) + } + list.Append(v) + } + + return nil +} + +func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName()) + } + + key, err := parseField(fieldDescriptor.MapKey(), values[0]) + if err != nil { + return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err) + } + + value, err := parseField(fieldDescriptor.MapValue(), values[1]) + if err != nil { + return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err) + } + + mp.Set(key.MapKey(), value) + + return nil +} + +func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) { + switch fieldDescriptor.Kind() { + case protoreflect.BoolKind: + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBool(v), nil + case protoreflect.EnumKind: + enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName()) + switch { + case errors.Is(err, protoregistry.NotFound): + return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName()) + case err != nil: + return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err) + } + // Look for enum by name + v := enum.Descriptor().Values().ByName(protoreflect.Name(value)) + if v == nil { + i, err := strconv.Atoi(value) + if err != nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + // Look for enum by number + v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)) + if v == nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + } + return protoreflect.ValueOfEnum(v.Number()), nil + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt32(int32(v)), nil + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt64(v), nil + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint32(uint32(v)), nil + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint64(v), nil + case protoreflect.FloatKind: + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat32(float32(v)), nil + case protoreflect.DoubleKind: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat64(v), nil + case protoreflect.StringKind: + return protoreflect.ValueOfString(value), nil + case protoreflect.BytesKind: + v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBytes(v), nil + case protoreflect.MessageKind, protoreflect.GroupKind: + return parseMessage(fieldDescriptor.Message(), value) + default: + panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind())) + } +} + +func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) { + var msg proto.Message + switch msgDescriptor.FullName() { + case "google.protobuf.Timestamp": + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return protoreflect.Value{}, err + } + msg = timestamppb.New(t) + case "google.protobuf.Duration": + d, err := time.ParseDuration(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = durationpb.New(d) + case "google.protobuf.DoubleValue": + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.DoubleValue{Value: v} + case "google.protobuf.FloatValue": + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.FloatValue{Value: float32(v)} + case "google.protobuf.Int64Value": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.Int64Value{Value: v} + case "google.protobuf.Int32Value": + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.Int32Value{Value: int32(v)} + case "google.protobuf.UInt64Value": + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.UInt64Value{Value: v} + case "google.protobuf.UInt32Value": + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.UInt32Value{Value: uint32(v)} + case "google.protobuf.BoolValue": + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.BoolValue{Value: v} + case "google.protobuf.StringValue": + msg = &wrapperspb.StringValue{Value: value} + case "google.protobuf.BytesValue": + v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.BytesValue{Value: v} + case "google.protobuf.FieldMask": + fm := &field_mask.FieldMask{} + fm.Paths = append(fm.Paths, strings.Split(value, ",")...) + msg = fm + case "google.protobuf.Value": + var v structpb.Value + err := protojson.Unmarshal([]byte(value), &v) + if err != nil { + return protoreflect.Value{}, err + } + msg = &v + case "google.protobuf.Struct": + var v structpb.Struct + err := protojson.Unmarshal([]byte(value), &v) + if err != nil { + return protoreflect.Value{}, err + } + msg = &v + default: + return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName())) + } + + return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel new file mode 100644 index 0000000000..b894094657 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "utilities", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "string_array_flag.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities", +) + +go_test( + name = "utilities_test", + size = "small", + srcs = [ + "string_array_flag_test.go", + "trie_test.go", + ], + deps = [":utilities"], +) + +alias( + name = "go_default_library", + actual = ":utilities", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go similarity index 100% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go similarity index 100% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go similarity index 100% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go new file mode 100644 index 0000000000..d224ab776c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -0,0 +1,33 @@ +package utilities + +import ( + "flag" + "strings" +) + +// flagInterface is an cut down interface to `flag` +type flagInterface interface { + Var(value flag.Value, name string, usage string) +} + +// StringArrayFlag defines a flag with the specified name and usage string. +// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag. +func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags { + value := &StringArrayFlags{} + f.Var(value, name, usage) + return value +} + +// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var` +type StringArrayFlags []string + +// String returns a string representation of `StringArrayFlags` +func (i *StringArrayFlags) String() string { + return strings.Join(*i, ",") +} + +// Set appends a value to `StringArrayFlags` +func (i *StringArrayFlags) Set(value string) error { + *i = append(*i, value) + return nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go similarity index 98% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go index c2b7b30dd9..af3b703d50 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go @@ -145,10 +145,7 @@ func (l byLex) Less(i, j int) bool { return false } } - if k < len(sj) { - return true - } - return false + return k < len(sj) } // HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go similarity index 58% rename from vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go rename to vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index aaeb19e30d..2ae8620fb6 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -12,17 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelgrpc +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" import ( - "context" - - "google.golang.org/grpc/metadata" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/global" + "go.opentelemetry.io/otel/metric/instrument" "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" ) @@ -33,10 +32,20 @@ const ( GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) +// Filter is a predicate used to determine whether a given request in +// interceptor info should be traced. A Filter must return true if +// the request should be traced. +type Filter func(*InterceptorInfo) bool + // config is a group of options for this instrumentation. type config struct { + Filter Filter Propagators propagation.TextMapPropagator TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + + meter metric.Meter + rpcServerDuration instrument.Int64Histogram } // Option applies an option value for a config. @@ -49,10 +58,22 @@ func newConfig(opts []Option) *config { c := &config{ Propagators: otel.GetTextMapPropagator(), TracerProvider: otel.GetTracerProvider(), + MeterProvider: global.MeterProvider(), } for _, o := range opts { o.apply(c) } + + c.meter = c.MeterProvider.Meter( + instrumentationName, + metric.WithInstrumentationVersion(SemVersion()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + var err error + if c.rpcServerDuration, err = c.meter.Int64Histogram("rpc.server.duration", instrument.WithUnit("ms")); err != nil { + otel.Handle(err) + } + return c } @@ -78,57 +99,37 @@ func (o tracerProviderOption) apply(c *config) { } } +// WithInterceptorFilter returns an Option to use the request filter. +func WithInterceptorFilter(f Filter) Option { + return interceptorFilterOption{f: f} +} + +type interceptorFilterOption struct { + f Filter +} + +func (o interceptorFilterOption) apply(c *config) { + if o.f != nil { + c.Filter = o.f + } +} + // WithTracerProvider returns an Option to use the TracerProvider when // creating a Tracer. func WithTracerProvider(tp trace.TracerProvider) Option { return tracerProviderOption{tp: tp} } -type metadataSupplier struct { - metadata *metadata.MD -} +type meterProviderOption struct{ mp metric.MeterProvider } -// assert that metadataSupplier implements the TextMapCarrier interface -var _ propagation.TextMapCarrier = &metadataSupplier{} - -func (s *metadataSupplier) Get(key string) string { - values := s.metadata.Get(key) - if len(values) == 0 { - return "" +func (o meterProviderOption) apply(c *config) { + if o.mp != nil { + c.MeterProvider = o.mp } - return values[0] } -func (s *metadataSupplier) Set(key string, value string) { - s.metadata.Set(key, value) -} - -func (s *metadataSupplier) Keys() []string { - out := make([]string, 0, len(*s.metadata)) - for key := range *s.metadata { - out = append(out, key) - } - return out -} - -// Inject injects correlation context and span context into the gRPC -// metadata object. This function is meant to be used on outgoing -// requests. -func Inject(ctx context.Context, metadata *metadata.MD, opts ...Option) { - c := newConfig(opts) - c.Propagators.Inject(ctx, &metadataSupplier{ - metadata: metadata, - }) -} - -// Extract returns the correlation context and span context that -// another service encoded in the gRPC metadata object with Inject. -// This function is meant to be used on incoming requests. -func Extract(ctx context.Context, metadata *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { - c := newConfig(opts) - ctx = c.Propagators.Extract(ctx, &metadataSupplier{ - metadata: metadata, - }) - - return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) +// WithMeterProvider returns an Option to use the MeterProvider when +// creating a Meter. If this option is not provide the global MeterProvider will be used. +func WithMeterProvider(mp metric.MeterProvider) Option { + return meterProviderOption{mp: mp} } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index b1d9f643d8..b74d558e37 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelgrpc +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" // gRPC tracing middleware // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md @@ -20,41 +20,36 @@ import ( "context" "io" "net" - - "github.com/golang/protobuf/proto" // nolint:staticcheck + "strconv" + "time" "google.golang.org/grpc" grpc_codes "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/baggage" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" ) type messageType attribute.KeyValue // Event adds an event of the messageType to the span associated with the -// passed context with id and size (if message is a proto message). -func (m messageType) Event(ctx context.Context, id int, message interface{}) { +// passed context with a message id. +func (m messageType) Event(ctx context.Context, id int, _ interface{}) { span := trace.SpanFromContext(ctx) - if p, ok := message.(proto.Message); ok { - span.AddEvent("message", trace.WithAttributes( - attribute.KeyValue(m), - RPCMessageIDKey.Int(id), - RPCMessageUncompressedSizeKey.Int(proto.Size(p)), - )) - } else { - span.AddEvent("message", trace.WithAttributes( - attribute.KeyValue(m), - RPCMessageIDKey.Int(id), - )) + if !span.IsRecording() { + return } + span.AddEvent("message", trace.WithAttributes( + attribute.KeyValue(m), + RPCMessageIDKey.Int(id), + )) } var ( @@ -65,6 +60,12 @@ var ( // UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable // for use in a grpc.Dial call. func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { + cfg := newConfig(opts) + tracer := cfg.TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + return func( ctx context.Context, method string, @@ -73,13 +74,13 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { invoker grpc.UnaryInvoker, callOpts ...grpc.CallOption, ) error { - requestMetadata, _ := metadata.FromOutgoingContext(ctx) - metadataCopy := requestMetadata.Copy() - - tracer := newConfig(opts).TracerProvider.Tracer( - instrumentationName, - trace.WithInstrumentationVersion(SemVersion()), - ) + i := &InterceptorInfo{ + Method: method, + Type: UnaryClient, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return invoker(ctx, method, req, reply, cc, callOpts...) + } name, attr := spanInfo(method, cc.Target()) var span trace.Span @@ -91,8 +92,7 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { ) defer span.End() - Inject(ctx, &metadataCopy, opts...) - ctx = metadata.NewOutgoingContext(ctx, metadataCopy) + ctx = inject(ctx, cfg.Propagators) messageSent.Event(ctx, 1, req) @@ -235,6 +235,12 @@ func (w *clientStream) sendStreamEvent(eventType streamEventType, err error) { // StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable // for use in a grpc.Dial call. func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + cfg := newConfig(opts) + tracer := cfg.TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + return func( ctx context.Context, desc *grpc.StreamDesc, @@ -243,13 +249,13 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { streamer grpc.Streamer, callOpts ...grpc.CallOption, ) (grpc.ClientStream, error) { - requestMetadata, _ := metadata.FromOutgoingContext(ctx) - metadataCopy := requestMetadata.Copy() - - tracer := newConfig(opts).TracerProvider.Tracer( - instrumentationName, - trace.WithInstrumentationVersion(SemVersion()), - ) + i := &InterceptorInfo{ + Method: method, + Type: StreamClient, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return streamer(ctx, desc, cc, method, callOpts...) + } name, attr := spanInfo(method, cc.Target()) var span trace.Span @@ -260,8 +266,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { trace.WithAttributes(attr...), ) - Inject(ctx, &metadataCopy, opts...) - ctx = metadata.NewOutgoingContext(ctx, metadataCopy) + ctx = inject(ctx, cfg.Propagators) s, err := streamer(ctx, desc, cc, method, callOpts...) if err != nil { @@ -294,26 +299,31 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { // UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable // for use in a grpc.NewServer call. func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + cfg := newConfig(opts) + tracer := cfg.TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + return func( ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, ) (interface{}, error) { - requestMetadata, _ := metadata.FromIncomingContext(ctx) - metadataCopy := requestMetadata.Copy() + i := &InterceptorInfo{ + UnaryServerInfo: info, + Type: UnaryServer, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return handler(ctx, req) + } - bags, spanCtx := Extract(ctx, &metadataCopy, opts...) - ctx = baggage.ContextWithBaggage(ctx, bags) - - tracer := newConfig(opts).TracerProvider.Tracer( - instrumentationName, - trace.WithInstrumentationVersion(SemVersion()), - ) + ctx = extract(ctx, cfg.Propagators) name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx)) ctx, span := tracer.Start( - trace.ContextWithRemoteSpanContext(ctx, spanCtx), + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), trace.WithAttributes(attr...), @@ -322,13 +332,22 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { messageReceived.Event(ctx, 1, req) + var statusCode grpc_codes.Code + defer func(t time.Time) { + elapsedTime := time.Since(t) / time.Millisecond + attr = append(attr, semconv.RPCGRPCStatusCodeKey.Int64(int64(statusCode))) + cfg.rpcServerDuration.Record(ctx, int64(elapsedTime), attr...) + }(time.Now()) + resp, err := handler(ctx, req) if err != nil { s, _ := status.FromError(err) + statusCode = s.Code() span.SetStatus(codes.Error, s.Message()) span.SetAttributes(statusCodeAttr(s.Code())) messageSent.Event(ctx, 1, s.Proto()) } else { + statusCode = grpc_codes.OK span.SetAttributes(statusCodeAttr(grpc_codes.OK)) messageSent.Event(ctx, 1, resp) } @@ -381,6 +400,12 @@ func wrapServerStream(ctx context.Context, ss grpc.ServerStream) *serverStream { // StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable // for use in a grpc.NewServer call. func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + cfg := newConfig(opts) + tracer := cfg.TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + return func( srv interface{}, ss grpc.ServerStream, @@ -388,21 +413,19 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { handler grpc.StreamHandler, ) error { ctx := ss.Context() + i := &InterceptorInfo{ + StreamServerInfo: info, + Type: StreamServer, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return handler(srv, wrapServerStream(ctx, ss)) + } - requestMetadata, _ := metadata.FromIncomingContext(ctx) - metadataCopy := requestMetadata.Copy() - - bags, spanCtx := Extract(ctx, &metadataCopy, opts...) - ctx = baggage.ContextWithBaggage(ctx, bags) - - tracer := newConfig(opts).TracerProvider.Tracer( - instrumentationName, - trace.WithInstrumentationVersion(SemVersion()), - ) + ctx = extract(ctx, cfg.Propagators) name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx)) ctx, span := tracer.Start( - trace.ContextWithRemoteSpanContext(ctx, spanCtx), + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), trace.WithAttributes(attr...), @@ -410,7 +433,6 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { defer span.End() err := handler(srv, wrapServerStream(ctx, ss)) - if err != nil { s, _ := status.FromError(err) span.SetStatus(codes.Error, s.Message()) @@ -435,7 +457,7 @@ func spanInfo(fullMethod, peerAddress string) (string, []attribute.KeyValue) { // peerAttr returns attributes about the peer address. func peerAttr(addr string) []attribute.KeyValue { - host, port, err := net.SplitHostPort(addr) + host, p, err := net.SplitHostPort(addr) if err != nil { return []attribute.KeyValue(nil) } @@ -443,11 +465,25 @@ func peerAttr(addr string) []attribute.KeyValue { if host == "" { host = "127.0.0.1" } - - return []attribute.KeyValue{ - semconv.NetPeerIPKey.String(host), - semconv.NetPeerPortKey.String(port), + port, err := strconv.Atoi(p) + if err != nil { + return []attribute.KeyValue(nil) } + + var attr []attribute.KeyValue + if ip := net.ParseIP(host); ip != nil { + attr = []attribute.KeyValue{ + semconv.NetSockPeerAddr(host), + semconv.NetSockPeerPort(port), + } + } else { + attr = []attribute.KeyValue{ + semconv.NetPeerName(host), + semconv.NetPeerPort(port), + } + } + + return attr } // peerFromCtx returns a peer address from a context, if one exists. @@ -459,7 +495,7 @@ func peerFromCtx(ctx context.Context) string { return p.Addr.String() } -// statusCodeAttr returns status code attribute based on given gRPC code +// statusCodeAttr returns status code attribute based on given gRPC code. func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue { return GRPCStatusCodeKey.Int64(int64(c)) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go new file mode 100644 index 0000000000..f6116946bf --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "google.golang.org/grpc" +) + +// InterceptorType is the flag to define which gRPC interceptor +// the InterceptorInfo object is. +type InterceptorType uint8 + +const ( + // UndefinedInterceptor is the type for the interceptor information that is not + // well initialized or categorized to other types. + UndefinedInterceptor InterceptorType = iota + // UnaryClient is the type for grpc.UnaryClient interceptor. + UnaryClient + // StreamClient is the type for grpc.StreamClient interceptor. + StreamClient + // UnaryServer is the type for grpc.UnaryServer interceptor. + UnaryServer + // StreamServer is the type for grpc.StreamServer interceptor. + StreamServer +) + +// InterceptorInfo is the union of some arguments to four types of +// gRPC interceptors. +type InterceptorInfo struct { + // Method is method name registered to UnaryClient and StreamClient + Method string + // UnaryServerInfo is the metadata for UnaryServer + UnaryServerInfo *grpc.UnaryServerInfo + // StreamServerInfo if the metadata for StreamServer + StreamServerInfo *grpc.StreamServerInfo + // Type is the type for interceptor + Type InterceptorType +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go index 56a682102b..c40f87c4f9 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -package internal +package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) // ParseFullMethod returns a span name following the OpenTelemetry semantic @@ -34,10 +34,10 @@ func ParseFullMethod(fullMethod string) (string, []attribute.KeyValue) { var attrs []attribute.KeyValue if service := parts[0]; service != "" { - attrs = append(attrs, semconv.RPCServiceKey.String(service)) + attrs = append(attrs, semconv.RPCService(service)) } if method := parts[1]; method != "" { - attrs = append(attrs, semconv.RPCMethodKey.String(method)) + attrs = append(attrs, semconv.RPCMethod(method)) } return name, attrs } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go new file mode 100644 index 0000000000..d91c6df237 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "context" + + "google.golang.org/grpc/metadata" + + "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type metadataSupplier struct { + metadata *metadata.MD +} + +// assert that metadataSupplier implements the TextMapCarrier interface. +var _ propagation.TextMapCarrier = &metadataSupplier{} + +func (s *metadataSupplier) Get(key string) string { + values := s.metadata.Get(key) + if len(values) == 0 { + return "" + } + return values[0] +} + +func (s *metadataSupplier) Set(key string, value string) { + s.metadata.Set(key, value) +} + +func (s *metadataSupplier) Keys() []string { + out := make([]string, 0, len(*s.metadata)) + for key := range *s.metadata { + out = append(out, key) + } + return out +} + +// Inject injects correlation context and span context into the gRPC +// metadata object. This function is meant to be used on outgoing +// requests. +// Deprecated: Unnecessary public func. +func Inject(ctx context.Context, md *metadata.MD, opts ...Option) { + c := newConfig(opts) + c.Propagators.Inject(ctx, &metadataSupplier{ + metadata: md, + }) +} + +func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.MD{} + } + propagators.Inject(ctx, &metadataSupplier{ + metadata: &md, + }) + return metadata.NewOutgoingContext(ctx, md) +} + +// Extract returns the correlation context and span context that +// another service encoded in the gRPC metadata object with Inject. +// This function is meant to be used on incoming requests. +// Deprecated: Unnecessary public func. +func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { + c := newConfig(opts) + ctx = c.Propagators.Extract(ctx, &metadataSupplier{ + metadata: md, + }) + + return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) +} + +func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + md = metadata.MD{} + } + + return propagators.Extract(ctx, &metadataSupplier{ + metadata: &md, + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go index 73f14a458e..b65fab308f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelgrpc +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" import ( "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) // Semantic conventions for attribute keys for gRPC. @@ -41,7 +41,7 @@ const ( // Semantic conventions for common RPC attributes. var ( // Semantic convention for gRPC as the remoting system. - RPCSystemGRPC = semconv.RPCSystemKey.String("grpc") + RPCSystemGRPC = semconv.RPCSystemGRPC // Semantic convention for a message named message. RPCNameMessage = RPCNameKey.String("message") diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index f7c0e09079..78cac03ed1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelgrpc +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.29.0" + return "0.40.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/api.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/api.go index 491061583e..dc29163dd6 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/api.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/api.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelhttptrace +package otelhttptrace // import "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" import ( "context" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go index a9c20429e0..14fa2961d4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelhttptrace +package otelhttptrace // import "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" import ( "context" @@ -25,7 +25,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" ) @@ -83,7 +83,7 @@ func WithoutSubSpans() ClientTraceOption { // WithRedactedHeaders will be replaced by fixed '****' values for the header // names provided. These are in addition to the sensitive headers already // redacted by default: Authorization, WWW-Authenticate, Proxy-Authenticate -// Proxy-Authorization, Cookie, Set-Cookie +// Proxy-Authorization, Cookie, Set-Cookie. func WithRedactedHeaders(headers ...string) ClientTraceOption { return clientTraceOptionFunc(func(ct *clientTracer) { for _, header := range headers { @@ -272,7 +272,7 @@ func (ct *clientTracer) span(hook string) trace.Span { } func (ct *clientTracer) getConn(host string) { - ct.start("http.getconn", "http.getconn", semconv.HTTPHostKey.String(host)) + ct.start("http.getconn", "http.getconn", semconv.NetHostName(host)) } func (ct *clientTracer) gotConn(info httptrace.GotConnInfo) { @@ -297,7 +297,7 @@ func (ct *clientTracer) gotFirstResponseByte() { } func (ct *clientTracer) dnsStart(info httptrace.DNSStartInfo) { - ct.start("http.dns", "http.dns", semconv.HTTPHostKey.String(info.Host)) + ct.start("http.dns", "http.dns", semconv.NetHostName(info.Host)) } func (ct *clientTracer) dnsDone(info httptrace.DNSDoneInfo) { @@ -342,7 +342,7 @@ func (ct *clientTracer) wroteHeaderField(k string, v []string) { if _, ok := ct.redactedHeaders[k]; ok { value = "****" } - ct.root.SetAttributes(attribute.String("http."+k, value)) + ct.root.SetAttributes(attribute.String("http.request.header."+k, value)) } func (ct *clientTracer) wroteHeaders() { @@ -398,11 +398,11 @@ func sm2s(value map[string][]string) string { var buf strings.Builder for k, v := range value { if buf.Len() != 0 { - buf.WriteString(",") + _, _ = buf.WriteString(",") } - buf.WriteString(k) - buf.WriteString("=") - buf.WriteString(sliceToString(v)) + _, _ = buf.WriteString(k) + _, _ = buf.WriteString("=") + _, _ = buf.WriteString(sliceToString(v)) } return buf.String() } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/httptrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/httptrace.go index 6fca43a3ba..9e243cfddd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/httptrace.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/httptrace.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelhttptrace +package otelhttptrace // import "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" import ( "context" @@ -22,7 +22,9 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/baggage" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv" + "go.opentelemetry.io/otel/semconv/v1.17.0/netconv" "go.opentelemetry.io/otel/trace" ) @@ -50,7 +52,7 @@ func newConfig(opts []Option) *config { return c } -// WithPropagators sets the propagators to use for Extraction and Injection +// WithPropagators sets the propagators to use for Extraction and Injection. func WithPropagators(props propagation.TextMapPropagator) Option { return optionFunc(func(c *config) { if props != nil { @@ -64,14 +66,16 @@ func Extract(ctx context.Context, req *http.Request, opts ...Option) ([]attribut c := newConfig(opts) ctx = c.propagators.Extract(ctx, propagation.HeaderCarrier(req.Header)) - attrs := append( - semconv.HTTPServerAttributesFromHTTPRequest("", "", req), - semconv.NetAttributesFromHTTPRequest("tcp", req)..., - ) - + attrs := append(httpconv.ServerRequest("", req), netconv.Transport("tcp")) + if req.ContentLength > 0 { + a := semconv.HTTPRequestContentLength(int(req.ContentLength)) + attrs = append(attrs, a) + } return attrs, baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) } +// Inject sets attributes, context entries, and span context from ctx into +// the request. func Inject(ctx context.Context, req *http.Request, opts ...Option) { c := newConfig(opts) c.propagators.Inject(ctx, propagation.HeaderCarrier(req.Header)) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go index 3641fbb6cc..da9dba1fb8 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelhttptrace +package otelhttptrace // import "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" // Version is the current release version of the httptrace instrumentation. func Version() string { - return "0.29.0" + return "0.40.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 0816b9f5da..92b8cf73c9 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelhttp +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" import ( "context" @@ -24,12 +24,12 @@ import ( // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. // Please be careful of intitialization order - for example, if you change -// the global propagator, the DefaultClient might still be using the old one +// the global propagator, the DefaultClient might still be using the old one. var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. -func Get(ctx context.Context, url string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "GET", url, nil) +func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) if err != nil { return nil, err } @@ -37,8 +37,8 @@ func Get(ctx context.Context, url string) (resp *http.Response, err error) { } // Head is a convenient replacement for http.Head that adds a span around the request. -func Head(ctx context.Context, url string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil) +func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) if err != nil { return nil, err } @@ -46,8 +46,8 @@ func Head(ctx context.Context, url string) (resp *http.Response, err error) { } // Post is a convenient replacement for http.Post that adds a span around the request. -func Post(ctx context.Context, url, contentType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "POST", url, body) +func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) if err != nil { return nil, err } @@ -56,6 +56,6 @@ func Post(ctx context.Context, url, contentType string, body io.Reader) (resp *h } // PostForm is a convenient replacement for http.PostForm that adds a span around the request. -func PostForm(ctx context.Context, url string, data url.Values) (resp *http.Response, err error) { - return Post(ctx, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) { + return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index dcc59449e4..728be09d0e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelhttp +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" import ( "net/http" @@ -29,7 +29,7 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Server HTTP metrics +// Server HTTP metrics. const ( RequestCount = "http.server.request_count" // Incoming request count total RequestContentLength = "http.server.request_content_length" // Incoming request bytes total diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index 0c18c11104..0c3e48d558 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelhttp +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" import ( "context" @@ -33,10 +33,13 @@ const ( // config represents the configuration options available for the http.Handler // and http.Transport types. type config struct { + ServerName string Tracer trace.Tracer Meter metric.Meter Propagators propagation.TextMapPropagator SpanStartOptions []trace.SpanStartOption + PublicEndpoint bool + PublicEndpointFn func(*http.Request) bool ReadEvent bool WriteEvent bool Filters []Filter @@ -62,7 +65,7 @@ func (o optionFunc) apply(c *config) { func newConfig(opts ...Option) *config { c := &config{ Propagators: otel.GetTextMapPropagator(), - MeterProvider: global.GetMeterProvider(), + MeterProvider: global.MeterProvider(), } for _, opt := range opts { opt.apply(c) @@ -106,7 +109,18 @@ func WithMeterProvider(provider metric.MeterProvider) Option { // association instead of a link. func WithPublicEndpoint() Option { return optionFunc(func(c *config) { - c.SpanStartOptions = append(c.SpanStartOptions, trace.WithNewRoot()) + c.PublicEndpoint = true + }) +} + +// WithPublicEndpointFn runs with every request, and allows conditionnally +// configuring the Handler to link the span with an incoming span context. If +// this option is not provided or returns false, then the association is a +// child association instead of a link. +// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. +func WithPublicEndpointFn(fn func(*http.Request) bool) Option { + return optionFunc(func(c *config) { + c.PublicEndpointFn = fn }) } @@ -142,7 +156,7 @@ func WithFilter(f Filter) Option { type event int -// Different types of events that can be recorded, see WithMessageEvents +// Different types of events that can be recorded, see WithMessageEvents. const ( ReadEvents event = iota WriteEvents @@ -153,10 +167,10 @@ const ( // end of the request. // // Valid events are: -// * ReadEvents: Record the number of bytes read after every http.Request.Body.Read -// using the ReadBytesKey -// * WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write -// using the WriteBytesKey +// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read +// using the ReadBytesKey +// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write +// using the WriteBytesKey func WithMessageEvents(events ...event) Option { return optionFunc(func(c *config) { for _, e := range events { @@ -171,7 +185,7 @@ func WithMessageEvents(events ...event) Option { } // WithSpanNameFormatter takes a function that will be called on every -// request and the returned string will become the Span Name +// request and the returned string will become the Span Name. func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { return optionFunc(func(c *config) { c.SpanNameFormatter = f @@ -185,3 +199,11 @@ func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { c.ClientTrace = f }) } + +// WithServerName returns an Option that sets the name of the (virtual) server +// handling requests. +func WithServerName(server string) Option { + return optionFunc(func(c *config) { + c.ServerName = server + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index dbba925dfa..d92aecc0a3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelhttp +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" import ( "io" @@ -24,8 +24,10 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/instrument" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv" "go.opentelemetry.io/otel/trace" ) @@ -37,6 +39,7 @@ var _ http.Handler = &Handler{} // to the span using the attribute.Keys defined in this package. type Handler struct { operation string + server string handler http.Handler tracer trace.Tracer @@ -47,8 +50,10 @@ type Handler struct { writeEvent bool filters []Filter spanNameFormatter func(string, *http.Request) string - counters map[string]metric.Int64Counter - valueRecorders map[string]metric.Float64Histogram + counters map[string]instrument.Int64Counter + valueRecorders map[string]instrument.Float64Histogram + publicEndpoint bool + publicEndpointFn func(*http.Request) bool } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -84,6 +89,9 @@ func (h *Handler) configure(c *config) { h.writeEvent = c.WriteEvent h.filters = c.Filters h.spanNameFormatter = c.SpanNameFormatter + h.publicEndpoint = c.PublicEndpoint + h.publicEndpointFn = c.PublicEndpointFn + h.server = c.ServerName } func handleErr(err error) { @@ -93,16 +101,16 @@ func handleErr(err error) { } func (h *Handler) createMeasures() { - h.counters = make(map[string]metric.Int64Counter) - h.valueRecorders = make(map[string]metric.Float64Histogram) + h.counters = make(map[string]instrument.Int64Counter) + h.valueRecorders = make(map[string]instrument.Float64Histogram) - requestBytesCounter, err := h.meter.NewInt64Counter(RequestContentLength) + requestBytesCounter, err := h.meter.Int64Counter(RequestContentLength) handleErr(err) - responseBytesCounter, err := h.meter.NewInt64Counter(ResponseContentLength) + responseBytesCounter, err := h.meter.Int64Counter(ResponseContentLength) handleErr(err) - serverLatencyMeasure, err := h.meter.NewFloat64Histogram(ServerLatency) + serverLatencyMeasure, err := h.meter.Float64Histogram(ServerLatency) handleErr(err) h.counters[RequestContentLength] = requestBytesCounter @@ -110,7 +118,7 @@ func (h *Handler) createMeasures() { h.valueRecorders[ServerLatency] = serverLatencyMeasure } -// ServeHTTP serves HTTP requests (http.Handler) +// ServeHTTP serves HTTP requests (http.Handler). func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { requestStartTime := time.Now() for _, f := range h.filters { @@ -121,11 +129,22 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } - opts := append([]trace.SpanStartOption{ - trace.WithAttributes(semconv.NetAttributesFromHTTPRequest("tcp", r)...), - trace.WithAttributes(semconv.EndUserAttributesFromHTTPRequest(r)...), - trace.WithAttributes(semconv.HTTPServerAttributesFromHTTPRequest(h.operation, "", r)...), - }, h.spanStartOptions...) // start with the configured options + ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) + opts := []trace.SpanStartOption{ + trace.WithAttributes(httpconv.ServerRequest(h.server, r)...), + } + if h.server != "" { + hostAttr := semconv.NetHostName(h.server) + opts = append(opts, trace.WithAttributes(hostAttr)) + } + opts = append(opts, h.spanStartOptions...) + if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. + if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { + opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s})) + } + } tracer := h.tracer @@ -137,7 +156,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } - ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) defer span.End() @@ -149,10 +167,10 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } var bw bodyWrapper - // if request body is nil we don't want to mutate the body as it will affect - // the identity of it in a unforeseeable way because we assert ReadCloser - // fullfills a certain interface and it is indeed nil. - if r.Body != nil { + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + if r.Body != nil && r.Body != http.NoBody { bw.ReadCloser = r.Body bw.record = readRecordFunc r.Body = &bw @@ -165,7 +183,13 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } - rww := &respWriterWrapper{ResponseWriter: w, record: writeRecordFunc, ctx: ctx, props: h.propagators} + rww := &respWriterWrapper{ + ResponseWriter: w, + record: writeRecordFunc, + ctx: ctx, + props: h.propagators, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } // Wrap w to use our ResponseWriter methods while also exposing // other interfaces that w may implement (http.CloseNotifier, @@ -191,7 +215,10 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) // Add metrics - attributes := append(labeler.Get(), semconv.HTTPServerMetricAttributesFromHTTPRequest(h.operation, r)...) + attributes := append(labeler.Get(), httpconv.ServerRequest(h.server, r)...) + if rww.statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) + } h.counters[RequestContentLength].Add(ctx, bw.read, attributes...) h.counters[ResponseContentLength].Add(ctx, rww.written, attributes...) @@ -216,9 +243,10 @@ func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, attributes = append(attributes, WroteBytesKey.Int64(wrote)) } if statusCode > 0 { - attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...) - span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(statusCode)) + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) } + span.SetStatus(httpconv.ServerStatus(statusCode)) + if werr != nil && werr != io.EOF { attributes = append(attributes, WriteErrorKey.String(werr.Error())) } @@ -230,7 +258,7 @@ func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, func WithRouteTag(route string, h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { span := trace.SpanFromContext(r.Context()) - span.SetAttributes(semconv.HTTPRouteKey.String(route)) + span.SetAttributes(semconv.HTTPRoute(route)) h.ServeHTTP(w, r) }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index 7b7d1c0ace..26a51a1805 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelhttp +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" import ( "context" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 121ad99b0a..9dda7e1a95 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelhttp +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" import ( "context" @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv" "go.opentelemetry.io/otel/trace" ) @@ -46,7 +46,7 @@ var _ http.RoundTripper = &Transport{} // starts a span and injects the span context into the outbound request headers. // // If the provided http.RoundTripper is nil, http.DefaultTransport will be used -// as the base http.RoundTripper +// as the base http.RoundTripper. func NewTransport(base http.RoundTripper, opts ...Option) *Transport { if base == nil { base = http.DefaultTransport @@ -110,7 +110,7 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { } r = r.WithContext(ctx) - span.SetAttributes(semconv.HTTPClientAttributesFromHTTPRequest(r)...) + span.SetAttributes(httpconv.ClientRequest(r)...) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) res, err := t.rt.RoundTrip(r) @@ -121,8 +121,8 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { return res, err } - span.SetAttributes(semconv.HTTPAttributesFromHTTPStatusCode(res.StatusCode)...) - span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(res.StatusCode)) + span.SetAttributes(httpconv.ClientResponse(res)...) + span.SetStatus(httpconv.ClientStatus(res.StatusCode)) res.Body = newWrappedBody(span, res.Body) return res, err @@ -186,5 +186,8 @@ func (wb *wrappedBody) Read(b []byte) (int, error) { func (wb *wrappedBody) Close() error { wb.span.End() - return wb.body.Close() + if wb.body != nil { + return wb.body.Close() + } + return nil } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 63402819ca..8a55855b9c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelhttp +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.29.0" + return "0.40.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go index ec787c820a..da6468c4e5 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelhttp +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" import ( "context" @@ -25,7 +25,7 @@ import ( var _ io.ReadCloser = &bodyWrapper{} // bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number -// of bytes read and the last error +// of bytes read and the last error. type bodyWrapper struct { io.ReadCloser record func(n int64) // must not be nil @@ -91,6 +91,5 @@ func (w *respWriterWrapper) WriteHeader(statusCode int) { } w.wroteHeader = true w.statusCode = statusCode - w.props.Inject(w.ctx, propagation.HeaderCarrier(w.Header())) w.ResponseWriter.WriteHeader(statusCode) } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 759cf53e00..0b605b3d67 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -11,11 +11,11 @@ coverage.* gen/ /example/fib/fib +/example/fib/traces.txt /example/jaeger/jaeger /example/namedtracer/namedtracer /example/opencensus/opencensus /example/passthrough/passthrough /example/prometheus/prometheus -/example/prom-collector/prom-collector /example/zipkin/zipkin /example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 7a5fdc07ab..0f099f5759 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,8 +9,9 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: - - deadcode + - depguard - errcheck + - godot - gofmt - goimports - gosimple @@ -19,29 +20,225 @@ linters: - misspell - revive - staticcheck - - structcheck - typecheck - unused - - varcheck - issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. exclude-rules: - # helpers in tests often (rightfully) pass a *testing.T as their first argument - - path: _test\.go - text: "context.Context should be the first parameter of a function" + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" linters: - revive - # Yes, they are, but it's okay in a test + # Yes, they are, but it's okay in a test. - path: _test\.go text: "exported func.*returns unexported type.*which can be annoying to use" linters: - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 linters-settings: + depguard: + # Check the list against standard lib. + # Default: false + include-go-root: true + # A list of packages for the list type specified. + # Default: [] + packages: + - "crypto/md5" + - "crypto/sha1" + - "crypto/**/pkix" + ignore-file-rules: + - "**/*_test.go" + additional-guards: + # Do not allow testing packages in non-test files. + - list-type: denylist + include-go-root: true + packages: + - testing + - github.com/stretchr/testify + ignore-file-rules: + - "**/*_test.go" + - "**/*test/*.go" + - "**/internal/matchers/*.go" + godot: + exclude: + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io misspell: locale: US ignore-words: - cancelled - goimports: - local-prefixes: go.opentelemetry.io + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) reenable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument + disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + disabled: false + arguments: + - ["call-chain", "loop"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + disabled: false + arguments: + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + disabled: false + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + disabled: false diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 0000000000..40d62fa2eb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,6 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects +file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries +file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/vendor/go.opentelemetry.io/otel/.markdown-link.json b/vendor/go.opentelemetry.io/otel/.markdown-link.json deleted file mode 100644 index f222ad89c3..0000000000 --- a/vendor/go.opentelemetry.io/otel/.markdown-link.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "ignorePatterns": [ - { - "pattern": "^http(s)?://localhost" - } - ], - "replacementPatterns": [ - { - "pattern": "^/registry", - "replacement": "https://opentelemetry.io/registry" - } - ], - "retryOn429": true, - "retryCount": 5, - "fallbackRetryDelay": "30s" -} diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 42dada4905..1d9726f60b 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,619 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.14.0/0.37.0/0.0.4] 2023-02-27 + +This release is the last to support [Go 1.18]. +The next release will require at least [Go 1.19]. + +### Added + +- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) +- Support [Go 1.20]. (#3693) +- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. + The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) + - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeNameKey` -> `OTelScopeNameKey` + - `OtelScopeVersionKey` -> `OTelScopeVersionKey` + - `OtelLibraryNameKey` -> `OTelLibraryNameKey` + - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` + - `OtelStatusCodeKey` -> `OTelStatusCodeKey` + - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` + - `OtelStatusCodeOk` -> `OTelStatusCodeOk` + - `OtelStatusCodeError` -> `OTelStatusCodeError` + - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeName` -> `OTelScopeName` + - `OtelScopeVersion` -> `OTelScopeVersion` + - `OtelLibraryName` -> `OTelLibraryName` + - `OtelLibraryVersion` -> `OTelLibraryVersion` + - `OtelStatusDescription` -> `OTelStatusDescription` +- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. + See the [README](./bridge/opentracing/README.md) for more information. (#3570) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) +- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) + - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. + - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. + +### Changed + +- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) +- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. + This change is made to enable memory reuse by SDK users. (#3732) +- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) + +### Fixed + +- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) +- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) +- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) +- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) +- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) +- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) +- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. + Use the equivalent unit string instead. (#3776) + - Use `"1"` instead of `unit.Dimensionless` + - Use `"By"` instead of `unit.Bytes` + - Use `"ms"` instead of `unit.Milliseconds` + +## [1.13.0/0.36.0] 2023-02-07 + +### Added + +- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. + These functions ensure semantic convention type correctness. (#3675) + +### Fixed + +- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) + - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) + +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the depreacted `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and confguration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in erros identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Reenabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### ⚠️ Notice ⚠️ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + ## [1.4.1] - 2022-02-16 ### Fixed @@ -1689,7 +2302,26 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.4.1...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.14.0...HEAD +[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 +[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 [1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 [1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 [1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 @@ -1731,3 +2363,7 @@ It contains api and sdk for trace and meter. [0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + +[Go 1.20]: https://go.dev/doc/go1.20 +[Go 1.19]: https://go.dev/doc/go1.19 +[Go 1.18]: https://go.dev/doc/go1.18 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 808755fe20..c4012ed6ca 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo @MadVikingGod @pellared +* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 7dead7084d..a6928bfdff 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -506,8 +506,9 @@ Approvers: - [Josh MacDonald](https://github.com/jmacd), LightStep - [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [David Ashpole](https://github.com/dashpole), Google -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Robert Pająk](https://github.com/pellared), Splunk +- [Chester Cheung](https://github.com/hanyuancheung), Tencent +- [Damien Mathieu](https://github.com/dmathieu), Elastic Maintainers: @@ -515,6 +516,10 @@ Maintainers: - [Anthony Mirabella](https://github.com/Aneurysm9), AWS - [Tyler Yahn](https://github.com/MrAlias), Splunk +Emeritus: + +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep + ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index b085561dba..0e6ffa284e 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -17,7 +17,7 @@ TOOLS_MOD_DIR := ./internal/tools ALL_DOCS := $(shell find . -name '*.md' -type f | sort) ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) -ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) GO = go TIMEOUT = 60 @@ -25,7 +25,7 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: license-check misspell go-mod-tidy golangci-lint-fix test-default +precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage # Tools @@ -45,7 +45,13 @@ SEMCONVGEN = $(TOOLS)/semconvgen $(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen CROSSLINK = $(TOOLS)/crosslink -$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +DBOTCONF = $(TOOLS)/dbotconf +$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint @@ -66,7 +72,7 @@ GOJQ = $(TOOLS)/gojq $(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) +tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) # Build @@ -123,6 +129,7 @@ test-coverage: | $(GOCOVMERGE) (cd "$${dir}" && \ $(GO) list ./... \ | grep -v third_party \ + | grep -v 'semconv/v.*' \ | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ $(GO) tool cover -html=coverage.out -o coverage.html); \ done; \ @@ -140,8 +147,8 @@ golangci-lint/%: | $(GOLANGCI_LINT) .PHONY: crosslink crosslink: | $(CROSSLINK) - @echo "cross-linking all go modules" \ - && $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune .PHONY: go-mod-tidy go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) @@ -149,7 +156,7 @@ go-mod-tidy/%: DIR=$* go-mod-tidy/%: | crosslink @echo "$(GO) mod tidy in $(DIR)" \ && cd $(DIR) \ - && $(GO) mod tidy + && $(GO) mod tidy -compat=1.18 .PHONY: lint-modules lint-modules: go-mod-tidy @@ -159,7 +166,11 @@ lint: misspell lint-modules golangci-lint .PHONY: vanity-import-check vanity-import-check: | $(PORTO) - @$(PORTO) --include-internal -l . + @$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)" + +.PHONY: vanity-import-fix +vanity-import-fix: | $(PORTO) + @$(PORTO) --include-internal -w . .PHONY: misspell misspell: | $(MISSPELL) @@ -167,7 +178,7 @@ misspell: | $(MISSPELL) .PHONY: license-check license-check: - @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*') ; do \ + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \ done); \ if [ -n "$${licRes}" ]; then \ @@ -175,20 +186,14 @@ license-check: exit 1; \ fi -DEPENDABOT_PATH=./.github/dependabot.yml +DEPENDABOT_CONFIG = .github/dependabot.yml .PHONY: dependabot-check -dependabot-check: - @result=$$( \ - for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.//' ); \ - do grep -q "directory: \+$$f" $(DEPENDABOT_PATH) \ - || echo "$$f"; \ - done; \ - ); \ - if [ -n "$$result" ]; then \ - echo "missing dependabot entry:"; echo "$$result"; \ - echo "new modules need to be added to the $(DEPENDABOT_PATH) file"; \ - exit 1; \ - fi +dependabot-check: | $(DBOTCONF) + @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)" + +.PHONY: dependabot-generate +dependabot-generate: | $(DBOTCONF) + @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) .PHONY: check-clean-work-tree check-clean-work-tree: @@ -200,6 +205,16 @@ check-clean-work-tree: exit 1; \ fi +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 ) + [ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + .PHONY: prerelease prerelease: | $(MULTIMOD) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 21c2a71612..878d87e58b 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -30,27 +30,41 @@ Project versioning information and stability guarantees can be found in the ### Compatibility -OpenTelemetry-Go attempts to track the current supported versions of the -[Go language](https://golang.org/doc/devel/release#policy). The release -schedule after a new minor version of go is as follows: +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): -- The first release or one month, which ever is sooner, will add build steps for the new go version. -- The first release after three months will remove support for the oldest go version. +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. -This project is tested on the following systems. +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. | OS | Go Version | Architecture | | ------- | ---------- | ------------ | -| Ubuntu | 1.17 | amd64 | -| Ubuntu | 1.16 | amd64 | -| Ubuntu | 1.17 | 386 | -| Ubuntu | 1.16 | 386 | -| MacOS | 1.17 | amd64 | -| MacOS | 1.16 | amd64 | -| Windows | 1.17 | amd64 | -| Windows | 1.16 | amd64 | -| Windows | 1.17 | 386 | -| Windows | 1.16 | 386 | +| Ubuntu | 1.20 | amd64 | +| Ubuntu | 1.19 | amd64 | +| Ubuntu | 1.18 | amd64 | +| Ubuntu | 1.20 | 386 | +| Ubuntu | 1.19 | 386 | +| Ubuntu | 1.18 | 386 | +| MacOS | 1.20 | amd64 | +| MacOS | 1.19 | amd64 | +| MacOS | 1.18 | amd64 | +| Windows | 1.20 | amd64 | +| Windows | 1.19 | amd64 | +| Windows | 1.18 | amd64 | +| Windows | 1.20 | 386 | +| Windows | 1.19 | 386 | +| Windows | 1.18 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -76,7 +90,7 @@ libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the -[go.opentelemetry.io/otel/api](https://pkg.go.dev/go.opentelemetry.io/otel/api) +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) package. The included [examples](./example/) are a good way to see some practical uses of this process. @@ -95,8 +109,6 @@ All officially supported exporters for the OpenTelemetry project are contained i | [stdout](./exporters/stdout/) | ✓ | ✓ | | [Zipkin](./exporters/zipkin/) | | ✓ | -Additionally, OpenTelemetry community supported exporters can be found in the [contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/exporters). - ## Contributing See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index e3bff66c6a..77d56c9365 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -2,35 +2,28 @@ ## Semantic Convention Generation -If a new version of the OpenTelemetry Specification has been released it will be necessary to generate a new -semantic convention package from the YAML definitions in the specification repository. There is a `semconvgen` utility -installed by `make tools` that can be used to generate the a package with the name matching the specification -version number under the `semconv` package. This will ideally be done soon after the specification release is -tagged. Make sure that the specification repo contains a checkout of the the latest tagged release so that the -generated files match the released semantic conventions. +New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. -There are currently two categories of semantic conventions that must be generated, `resource` and `trace`. +1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. -``` -.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/resource -t semconv/template.j2 -.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/trace -t semconv/template.j2 +For example, + +```sh +export TAG="v1.13.0" # Change to the release version you are generating. +export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification" +git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" -b "$TAG" +docker pull otel/semconvgen:latest +make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO. ``` -Using default values for all options other than `input` will result in using the `template.j2` template to -generate `resource.go` and `trace.go` in `/path/to/otelgo/repo/semconv/`. +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. -There are several ancillary files that are not generated and should be copied into the new package from the -prior package, with updates made as appropriate to canonical import path statements and constant values. -These files include: - -* doc.go -* exception.go -* http(_test)?.go -* schema.go - -Uses of the previous schema version in this repository should be updated to use the newly generated version. -No tooling for this exists at present, so use find/replace in your editor of choice or craft a `grep | sed` -pipeline if you like living on the edge. +**Note**, the generation code was changed to generate versions >= 1.13. +To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)). ## Pre-Release @@ -130,3 +123,5 @@ Once verified be sure to [make a release for the `contrib` repository](https://g Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/). Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 8b940f78dc..fe2bc5766c 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -21,19 +21,17 @@ import ( ) type ( - // Encoder is a mechanism for serializing a label set into a - // specific string representation that supports caching, to - // avoid repeated serialization. An example could be an - // exporter encoding the label set into a wire representation. + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. Encoder interface { - // Encode returns the serialized encoding of the label - // set using its Iterator. This result may be cached - // by a attribute.Set. + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. Encode(iterator Iterator) string - // ID returns a value that is unique for each class of - // label encoder. Label encoders allocate these using - // `NewEncoderID`. + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. ID() EncoderID } @@ -43,54 +41,53 @@ type ( value uint64 } - // defaultLabelEncoder uses a sync.Pool of buffers to reduce - // the number of allocations used in encoding labels. This - // implementation encodes a comma-separated list of key=value, - // with '/'-escaping of '=', ',', and '\'. - defaultLabelEncoder struct { - // pool is a pool of labelset builders. The buffers in this - // pool grow to a size that most label encodings will not - // allocate new memory. + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. pool sync.Pool // *bytes.Buffer } ) -// escapeChar is used to ensure uniqueness of the label encoding where -// keys or values contain either '=' or ','. Since there is no parser -// needed for this encoding and its only requirement is to be unique, -// this choice is arbitrary. Users will see these in some exporters -// (e.g., stdout), so the backslash ('\') is used as a conventional choice. +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. const escapeChar = '\\' var ( - _ Encoder = &defaultLabelEncoder{} + _ Encoder = &defaultAttrEncoder{} - // encoderIDCounter is for generating IDs for other label - // encoders. + // encoderIDCounter is for generating IDs for other attribute encoders. encoderIDCounter uint64 defaultEncoderOnce sync.Once defaultEncoderID = NewEncoderID() - defaultEncoderInstance *defaultLabelEncoder + defaultEncoderInstance *defaultAttrEncoder ) -// NewEncoderID returns a unique label encoder ID. It should be -// called once per each type of label encoder. Preferably in init() or -// in var definition. +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. func NewEncoderID() EncoderID { return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} } -// DefaultEncoder returns a label encoder that encodes labels -// in such a way that each escaped label's key is followed by an equal -// sign and then by an escaped label's value. All key-value pairs are -// separated by a comma. +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. // -// Escaping is done by prepending a backslash before either a -// backslash, equal sign or a comma. +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. func DefaultEncoder() Encoder { defaultEncoderOnce.Do(func() { - defaultEncoderInstance = &defaultLabelEncoder{ + defaultEncoderInstance = &defaultAttrEncoder{ pool: sync.Pool{ New: func() interface{} { return &bytes.Buffer{} @@ -101,15 +98,14 @@ func DefaultEncoder() Encoder { return defaultEncoderInstance } -// Encode is a part of an implementation of the LabelEncoder -// interface. -func (d *defaultLabelEncoder) Encode(iter Iterator) string { +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { buf := d.pool.Get().(*bytes.Buffer) defer d.pool.Put(buf) buf.Reset() for iter.Next() { - i, keyValue := iter.IndexedLabel() + i, keyValue := iter.IndexedAttribute() if i > 0 { _, _ = buf.WriteRune(',') } @@ -126,8 +122,8 @@ func (d *defaultLabelEncoder) Encode(iter Iterator) string { return buf.String() } -// ID is a part of an implementation of the LabelEncoder interface. -func (*defaultLabelEncoder) ID() EncoderID { +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { return defaultEncoderID } @@ -137,9 +133,9 @@ func copyAndEscape(buf *bytes.Buffer, val string) { for _, ch := range val { switch ch { case '=', ',', escapeChar: - buf.WriteRune(escapeChar) + _, _ = buf.WriteRune(escapeChar) } - buf.WriteRune(ch) + _, _ = buf.WriteRune(ch) } } diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index e03aabb62b..841b271fb7 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -14,16 +14,16 @@ package attribute // import "go.opentelemetry.io/otel/attribute" -// Iterator allows iterating over the set of labels in order, -// sorted by key. +// Iterator allows iterating over the set of attributes in order, sorted by +// key. type Iterator struct { storage *Set idx int } -// MergeIterator supports iterating over two sets of labels while -// eliminating duplicate values from the combined set. The first -// iterator value takes precedence. +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. type MergeIterator struct { one oneIterator two oneIterator @@ -31,13 +31,13 @@ type MergeIterator struct { } type oneIterator struct { - iter Iterator - done bool - label KeyValue + iter Iterator + done bool + attr KeyValue } -// Next moves the iterator to the next position. Returns false if there -// are no more labels. +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. func (i *Iterator) Next() bool { i.idx++ return i.idx < i.Len() @@ -45,30 +45,41 @@ func (i *Iterator) Next() bool { // Label returns current KeyValue. Must be called only after Next returns // true. +// +// Deprecated: Use Attribute instead. func (i *Iterator) Label() KeyValue { + return i.Attribute() +} + +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. +func (i *Iterator) Attribute() KeyValue { kv, _ := i.storage.Get(i.idx) return kv } -// Attribute is a synonym for Label(). -func (i *Iterator) Attribute() KeyValue { - return i.Label() -} - // IndexedLabel returns current index and attribute. Must be called only // after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. func (i *Iterator) IndexedLabel() (int, KeyValue) { - return i.idx, i.Label() + return i.idx, i.Attribute() } -// Len returns a number of labels in the iterator's `*Set`. +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. func (i *Iterator) Len() int { return i.storage.Len() } -// ToSlice is a convenience function that creates a slice of labels -// from the passed iterator. The iterator is set up to start from the -// beginning before creating the slice. +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. func (i *Iterator) ToSlice() []KeyValue { l := i.Len() if l == 0 { @@ -77,12 +88,12 @@ func (i *Iterator) ToSlice() []KeyValue { i.idx = -1 slice := make([]KeyValue, 0, l) for i.Next() { - slice = append(slice, i.Label()) + slice = append(slice, i.Attribute()) } return slice } -// NewMergeIterator returns a MergeIterator for merging two label sets +// NewMergeIterator returns a MergeIterator for merging two attribute sets. // Duplicates are resolved by taking the value from the first set. func NewMergeIterator(s1, s2 *Set) MergeIterator { mi := MergeIterator{ @@ -102,42 +113,49 @@ func makeOne(iter Iterator) oneIterator { func (oi *oneIterator) advance() { if oi.done = !oi.iter.Next(); !oi.done { - oi.label = oi.iter.Label() + oi.attr = oi.iter.Attribute() } } -// Next returns true if there is another label available. +// Next returns true if there is another attribute available. func (m *MergeIterator) Next() bool { if m.one.done && m.two.done { return false } if m.one.done { - m.current = m.two.label + m.current = m.two.attr m.two.advance() return true } if m.two.done { - m.current = m.one.label + m.current = m.one.attr m.one.advance() return true } - if m.one.label.Key == m.two.label.Key { - m.current = m.one.label // first iterator label value wins + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins m.one.advance() m.two.advance() return true } - if m.one.label.Key < m.two.label.Key { - m.current = m.one.label + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr m.one.advance() return true } - m.current = m.two.label + m.current = m.two.attr m.two.advance() return true } // Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. func (m *MergeIterator) Label() KeyValue { return m.current } + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index a28f1435cb..26be598322 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -21,49 +21,42 @@ import ( ) type ( - // Set is the representation for a distinct label set. It - // manages an immutable set of labels, with an internal cache - // for storing label encodings. + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. // - // This type supports the `Equivalent` method of comparison - // using values of type `Distinct`. - // - // This type is used to implement: - // 1. Metric labels - // 2. Resource sets - // 3. Correlation map (TODO) + // This type supports the Equivalent method of comparison using values of + // type Distinct. Set struct { equivalent Distinct } - // Distinct wraps a variable-size array of `KeyValue`, - // constructed with keys in sorted order. This can be used as - // a map key or for equality checking between Sets. + // Distinct wraps a variable-size array of KeyValue, constructed with keys + // in sorted order. This can be used as a map key or for equality checking + // between Sets. Distinct struct { iface interface{} } - // Filter supports removing certain labels from label sets. - // When the filter returns true, the label will be kept in - // the filtered label set. When the filter returns false, the - // label is excluded from the filtered label set, and the - // label instead appears in the `removed` list of excluded labels. + // Filter supports removing certain attributes from attribute sets. When + // the filter returns true, the attribute will be kept in the filtered + // attribute set. When the filter returns false, the attribute is excluded + // from the filtered attribute set, and the attribute instead appears in + // the removed list of excluded attributes. Filter func(KeyValue) bool - // Sortable implements `sort.Interface`, used for sorting - // `KeyValue`. This is an exported type to support a - // memory optimization. A pointer to one of these is needed - // for the call to `sort.Stable()`, which the caller may - // provide in order to avoid an allocation. See - // `NewSetWithSortable()`. + // Sortable implements sort.Interface, used for sorting KeyValue. This is + // an exported type to support a memory optimization. A pointer to one of + // these is needed for the call to sort.Stable(), which the caller may + // provide in order to avoid an allocation. See NewSetWithSortable(). Sortable []KeyValue ) var ( - // keyValueType is used in `computeDistinctReflect`. + // keyValueType is used in computeDistinctReflect. keyValueType = reflect.TypeOf(KeyValue{}) - // emptySet is returned for empty label sets. + // emptySet is returned for empty attribute sets. emptySet = &Set{ equivalent: Distinct{ iface: [0]KeyValue{}, @@ -78,30 +71,30 @@ func EmptySet() *Set { return emptySet } -// reflect abbreviates `reflect.ValueOf`. -func (d Distinct) reflect() reflect.Value { +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { return reflect.ValueOf(d.iface) } -// Valid returns true if this value refers to a valid `*Set`. +// Valid returns true if this value refers to a valid Set. func (d Distinct) Valid() bool { return d.iface != nil } -// Len returns the number of labels in this set. +// Len returns the number of attributes in this set. func (l *Set) Len() int { if l == nil || !l.equivalent.Valid() { return 0 } - return l.equivalent.reflect().Len() + return l.equivalent.reflectValue().Len() } -// Get returns the KeyValue at ordered position `idx` in this set. +// Get returns the KeyValue at ordered position idx in this set. func (l *Set) Get(idx int) (KeyValue, bool) { if l == nil { return KeyValue{}, false } - value := l.equivalent.reflect() + value := l.equivalent.reflectValue() if idx >= 0 && idx < value.Len() { // Note: The Go compiler successfully avoids an allocation for @@ -117,7 +110,7 @@ func (l *Set) Value(k Key) (Value, bool) { if l == nil { return Value{}, false } - rValue := l.equivalent.reflect() + rValue := l.equivalent.reflectValue() vlen := rValue.Len() idx := sort.Search(vlen, func(idx int) bool { @@ -142,7 +135,7 @@ func (l *Set) HasValue(k Key) bool { return ok } -// Iter returns an iterator for visiting the labels in this set. +// Iter returns an iterator for visiting the attributes in this set. func (l *Set) Iter() Iterator { return Iterator{ storage: l, @@ -150,18 +143,17 @@ func (l *Set) Iter() Iterator { } } -// ToSlice returns the set of labels belonging to this set, sorted, -// where keys appear no more than once. +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. func (l *Set) ToSlice() []KeyValue { iter := l.Iter() return iter.ToSlice() } -// Equivalent returns a value that may be used as a map key. The -// Distinct type guarantees that the result will equal the equivalent -// Distinct value of any label set with the same elements as this, -// where sets are made unique by choosing the last value in the input -// for any given key. +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. func (l *Set) Equivalent() Distinct { if l == nil || !l.equivalent.Valid() { return emptySet.equivalent @@ -174,8 +166,7 @@ func (l *Set) Equals(o *Set) bool { return l.Equivalent() == o.Equivalent() } -// Encoded returns the encoded form of this set, according to -// `encoder`. +// Encoded returns the encoded form of this set, according to encoder. func (l *Set) Encoded(encoder Encoder) string { if l == nil || encoder == nil { return "" @@ -190,11 +181,11 @@ func empty() Set { } } -// NewSet returns a new `Set`. See the documentation for -// `NewSetWithSortableFiltered` for more details. +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. // -// Except for empty sets, this method adds an additional allocation -// compared with calls that include a `*Sortable`. +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. func NewSet(kvs ...KeyValue) Set { // Check for empty set. if len(kvs) == 0 { @@ -204,10 +195,10 @@ func NewSet(kvs ...KeyValue) Set { return s } -// NewSetWithSortable returns a new `Set`. See the documentation for -// `NewSetWithSortableFiltered` for more details. +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. // -// This call includes a `*Sortable` option as a memory optimization. +// This call includes a Sortable option as a memory optimization. func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { // Check for empty set. if len(kvs) == 0 { @@ -217,12 +208,11 @@ func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { return s } -// NewSetWithFiltered returns a new `Set`. See the documentation for -// `NewSetWithSortableFiltered` for more details. +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. // -// This call includes a `Filter` to include/exclude label keys from -// the return value. Excluded keys are returned as a slice of label -// values. +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // Check for empty set. if len(kvs) == 0 { @@ -231,7 +221,7 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { return NewSetWithSortableFiltered(kvs, new(Sortable), filter) } -// NewSetWithSortableFiltered returns a new `Set`. +// NewSetWithSortableFiltered returns a new Set. // // Duplicate keys are eliminated by taking the last value. This // re-orders the input slice so that unique last-values are contiguous @@ -243,17 +233,16 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // - Caller sees the reordering, but doesn't lose values // - Repeated call preserve last-value wins. // -// Note that methods are defined on `*Set`, although this returns `Set`. -// Callers can avoid memory allocations by: +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: // -// - allocating a `Sortable` for use as a temporary in this method -// - allocating a `Set` for storing the return value of this -// constructor. +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. // -// The result maintains a cache of encoded labels, by attribute.EncoderID. +// The result maintains a cache of encoded attributes, by attribute.EncoderID. // This value should not be copied after its first use. // -// The second `[]KeyValue` return value is a list of labels that were +// The second []KeyValue return value is a list of attributes that were // excluded by the Filter (if non-nil). func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { // Check for empty set. @@ -293,13 +282,13 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S }, nil } -// filterSet reorders `kvs` so that included keys are contiguous at -// the end of the slice, while excluded keys precede the included keys. +// filterSet reorders kvs so that included keys are contiguous at the end of +// the slice, while excluded keys precede the included keys. func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { var excluded []KeyValue - // Move labels that do not match the filter so - // they're adjacent before calling computeDistinct(). + // Move attributes that do not match the filter so they're adjacent before + // calling computeDistinct(). distinctPosition := len(kvs) // Swap indistinct keys forward and distinct keys toward the @@ -319,8 +308,8 @@ func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { }, excluded } -// Filter returns a filtered copy of this `Set`. See the -// documentation for `NewSetWithSortableFiltered` for more details. +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. func (l *Set) Filter(re Filter) (Set, []KeyValue) { if re == nil { return Set{ @@ -333,9 +322,9 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { return filterSet(l.ToSlice(), re) } -// computeDistinct returns a `Distinct` using either the fixed- or -// reflect-oriented code path, depending on the size of the input. -// The input slice is assumed to already be sorted and de-duplicated. +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. func computeDistinct(kvs []KeyValue) Distinct { iface := computeDistinctFixed(kvs) if iface == nil { @@ -346,8 +335,8 @@ func computeDistinct(kvs []KeyValue) Distinct { } } -// computeDistinctFixed computes a `Distinct` for small slices. It -// returns nil if the input is too large for this code path. +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: @@ -395,8 +384,8 @@ func computeDistinctFixed(kvs []KeyValue) interface{} { } } -// computeDistinctReflect computes a `Distinct` using reflection, -// works for any size input. +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. func computeDistinctReflect(kvs []KeyValue) interface{} { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { @@ -405,7 +394,7 @@ func computeDistinctReflect(kvs []KeyValue) interface{} { return at.Interface() } -// MarshalJSON returns the JSON encoding of the `*Set`. +// MarshalJSON returns the JSON encoding of the Set. func (l *Set) MarshalJSON() ([]byte, error) { return json.Marshal(l.equivalent.iface) } @@ -419,17 +408,17 @@ func (l Set) MarshalLog() interface{} { return kvs } -// Len implements `sort.Interface`. +// Len implements sort.Interface. func (l *Sortable) Len() int { return len(*l) } -// Swap implements `sort.Interface`. +// Swap implements sort.Interface. func (l *Sortable) Swap(i, j int) { (*l)[i], (*l)[j] = (*l)[j], (*l)[i] } -// Less implements `sort.Interface`. +// Less implements sort.Interface. func (l *Sortable) Less(i, j int) bool { return (*l)[i].Key < (*l)[j].Key } diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 6ec5cb290d..cb21dd5c09 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -17,15 +17,17 @@ package attribute // import "go.opentelemetry.io/otel/attribute" import ( "encoding/json" "fmt" + "reflect" "strconv" "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" ) //go:generate stringer -type=Type // Type describes the type of the data Value holds. -type Type int +type Type int // nolint: revive // redefines builtin Type. // Value represents the value part in key-value pairs. type Value struct { @@ -66,12 +68,7 @@ func BoolValue(v bool) Value { // BoolSliceValue creates a BOOLSLICE Value. func BoolSliceValue(v []bool) Value { - cp := make([]bool, len(v)) - copy(cp, v) - return Value{ - vtype: BOOLSLICE, - slice: &cp, - } + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} } // IntValue creates an INT64 Value. @@ -81,13 +78,14 @@ func IntValue(v int) Value { // IntSliceValue creates an INTSLICE Value. func IntSliceValue(v []int) Value { - cp := make([]int64, 0, len(v)) - for _, i := range v { - cp = append(cp, int64(i)) + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) } return Value{ vtype: INT64SLICE, - slice: &cp, + slice: cp.Elem().Interface(), } } @@ -101,12 +99,7 @@ func Int64Value(v int64) Value { // Int64SliceValue creates an INT64SLICE Value. func Int64SliceValue(v []int64) Value { - cp := make([]int64, len(v)) - copy(cp, v) - return Value{ - vtype: INT64SLICE, - slice: &cp, - } + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} } // Float64Value creates a FLOAT64 Value. @@ -119,12 +112,7 @@ func Float64Value(v float64) Value { // Float64SliceValue creates a FLOAT64SLICE Value. func Float64SliceValue(v []float64) Value { - cp := make([]float64, len(v)) - copy(cp, v) - return Value{ - vtype: FLOAT64SLICE, - slice: &cp, - } + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} } // StringValue creates a STRING Value. @@ -137,12 +125,7 @@ func StringValue(v string) Value { // StringSliceValue creates a STRINGSLICE Value. func StringSliceValue(v []string) Value { - cp := make([]string, len(v)) - copy(cp, v) - return Value{ - vtype: STRINGSLICE, - slice: &cp, - } + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} } // Type returns a type of the Value. @@ -159,10 +142,14 @@ func (v Value) AsBool() bool { // AsBoolSlice returns the []bool value. Make sure that the Value's type is // BOOLSLICE. func (v Value) AsBoolSlice() []bool { - if s, ok := v.slice.(*[]bool); ok { - return *s + if v.vtype != BOOLSLICE { + return nil } - return nil + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsBoolSlice(v.slice) } // AsInt64 returns the int64 value. Make sure that the Value's type is @@ -174,10 +161,14 @@ func (v Value) AsInt64() int64 { // AsInt64Slice returns the []int64 value. Make sure that the Value's type is // INT64SLICE. func (v Value) AsInt64Slice() []int64 { - if s, ok := v.slice.(*[]int64); ok { - return *s + if v.vtype != INT64SLICE { + return nil } - return nil + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsInt64Slice(v.slice) } // AsFloat64 returns the float64 value. Make sure that the Value's @@ -189,10 +180,14 @@ func (v Value) AsFloat64() float64 { // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is // FLOAT64SLICE. func (v Value) AsFloat64Slice() []float64 { - if s, ok := v.slice.(*[]float64); ok { - return *s + if v.vtype != FLOAT64SLICE { + return nil } - return nil + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsFloat64Slice(v.slice) } // AsString returns the string value. Make sure that the Value's type @@ -204,10 +199,14 @@ func (v Value) AsString() string { // AsStringSlice returns the []string value. Make sure that the Value's type is // STRINGSLICE. func (v Value) AsStringSlice() []string { - if s, ok := v.slice.(*[]string); ok { - return *s + if v.vtype != STRINGSLICE { + return nil } - return nil + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsStringSlice(v.slice) } type unknownValueType struct{} @@ -218,19 +217,19 @@ func (v Value) AsInterface() interface{} { case BOOL: return v.AsBool() case BOOLSLICE: - return v.AsBoolSlice() + return v.asBoolSlice() case INT64: return v.AsInt64() case INT64SLICE: - return v.AsInt64Slice() + return v.asInt64Slice() case FLOAT64: return v.AsFloat64() case FLOAT64SLICE: - return v.AsFloat64Slice() + return v.asFloat64Slice() case STRING: return v.stringly case STRINGSLICE: - return v.AsStringSlice() + return v.asStringSlice() } return unknownValueType{} } @@ -239,19 +238,19 @@ func (v Value) AsInterface() interface{} { func (v Value) Emit() string { switch v.Type() { case BOOLSLICE: - return fmt.Sprint(*(v.slice.(*[]bool))) + return fmt.Sprint(v.asBoolSlice()) case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(*(v.slice.(*[]int64))) + return fmt.Sprint(v.asInt64Slice()) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(*(v.slice.(*[]float64))) + return fmt.Sprint(v.asFloat64Slice()) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(*(v.slice.(*[]string))) + return fmt.Sprint(v.asStringSlice()) case STRING: return v.stringly default: diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 824c67b27a..a36db8f8d8 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -68,6 +68,9 @@ type Property struct { hasData bool } +// NewKeyProperty returns a new Property for key. +// +// If key is invalid, an error will be returned. func NewKeyProperty(key string) (Property, error) { if !keyRe.MatchString(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) @@ -77,6 +80,9 @@ func NewKeyProperty(key string) (Property, error) { return p, nil } +// NewKeyValueProperty returns a new Property for key with value. +// +// If key or value are invalid, an error will be returned. func NewKeyValueProperty(key, value string) (Property, error) { if !keyRe.MatchString(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) @@ -151,7 +157,7 @@ func (p Property) Key() string { return p.key } -// Value returns the Property value. Additionally a boolean value is returned +// Value returns the Property value. Additionally, a boolean value is returned // indicating if the returned value is the empty if the Property has a value // that is empty or if the value is not set. func (p Property) Value() (string, bool) { @@ -244,8 +250,9 @@ type Member struct { hasData bool } -// NewMember returns a new Member from the passed arguments. An error is -// returned if the created Member would be invalid according to the W3C +// NewMember returns a new Member from the passed arguments. The key will be +// used directly while the value will be url decoded after validation. An error +// is returned if the created Member would be invalid according to the W3C // Baggage specification. func NewMember(key, value string, props ...Property) (Member, error) { m := Member{ @@ -257,7 +264,11 @@ func NewMember(key, value string, props ...Property) (Member, error) { if err := m.validate(); err != nil { return newInvalidMember(), err } - + decodedValue, err := url.QueryUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + m.value = decodedValue return m, nil } @@ -322,8 +333,9 @@ func parseMember(member string) (Member, error) { return Member{key: key, value: value, properties: props, hasData: true}, nil } -// validate ensures m conforms to the W3C Baggage specification, returning an -// error otherwise. +// validate ensures m conforms to the W3C Baggage specification. +// A key is just an ASCII string, but a value must be URL encoded UTF-8, +// returning an error otherwise. func (m Member) validate() error { if !m.hasData { return fmt.Errorf("%w: %q", errInvalidMember, m) @@ -386,7 +398,7 @@ func New(members ...Member) (Baggage, error) { } } - // Check member numbers after deduplicating. + // Check member numbers after deduplication. if len(b) > maxMembers { return Baggage{}, errMemberNumber } @@ -448,7 +460,7 @@ func Parse(bStr string) (Baggage, error) { func (b Baggage) Member(key string) Member { v, ok := b.list[key] if !ok { - // We do not need to worry about distiguising between the situation + // We do not need to worry about distinguishing between the situation // where a zero-valued Member is included in the Baggage because a // zero-valued Member is invalid according to the W3C Baggage // specification (it has an empty key). @@ -459,6 +471,7 @@ func (b Baggage) Member(key string) Member { key: key, value: v.Value, properties: fromInternalProperties(v.Properties), + hasData: true, } } @@ -478,6 +491,7 @@ func (b Baggage) Members() []Member { key: k, value: v.Value, properties: fromInternalProperties(v.Properties), + hasData: true, }) } return members diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 064a9279fd..587ebae4e3 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -23,10 +23,20 @@ import ( const ( // Unset is the default status code. Unset Code = 0 + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. Error Code = 1 + // Ok indicates operation has been validated by an Application developers // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. Ok Code = 2 maxCode = 3 diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go new file mode 100644 index 0000000000..b3fd45d9d3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal contains common functionality for all OTLP exporters. +package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" + +import ( + "fmt" + "path" + "strings" +) + +// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead. +func CleanPath(urlPath string, defaultPath string) string { + tmp := path.Clean(strings.TrimSpace(urlPath)) + if tmp == "." { + return defaultPath + } + if !path.IsAbs(tmp) { + tmp = fmt.Sprintf("/%s", tmp) + } + return tmp +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go new file mode 100644 index 0000000000..53ff3126b6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "go.opentelemetry.io/otel/internal/global" +) + +// ConfigFn is the generic function used to set a config. +type ConfigFn func(*EnvOptionsReader) + +// EnvOptionsReader reads the required environment variables. +type EnvOptionsReader struct { + GetEnv func(string) string + ReadFile func(string) ([]byte, error) + Namespace string +} + +// Apply runs every ConfigFn. +func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { + for _, o := range opts { + o(e) + } +} + +// GetEnvValue gets an OTLP environment variable value of the specified key +// using the GetEnv function. +// This function prepends the OTLP specified namespace to all key lookups. +func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { + v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) + return v, v != "" +} + +// WithString retrieves the specified config and passes it to ConfigFn as a string. +func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(v) + } + } +} + +// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. +func WithBool(n string, fn func(bool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b := strings.ToLower(v) == "true" + fn(b) + } + } +} + +// WithDuration retrieves the specified config and passes it to ConfigFn as a duration. +func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "input", v) + return + } + fn(time.Duration(d) * time.Millisecond) + } + } +} + +// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. +func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(stringToHeader(v)) + } + } +} + +// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. +func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "parse url", "input", v) + return + } + fn(u) + } + } +} + +// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. +func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b, err := e.ReadFile(v) + if err != nil { + global.Error(err, "read tls ca cert file", "file", v) + return + } + c, err := createCertPool(b) + if err != nil { + global.Error(err, "create tls cert pool") + return + } + fn(c) + } + } +} + +// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. +func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { + return func(e *EnvOptionsReader) { + vc, okc := e.GetEnvValue(nc) + vk, okk := e.GetEnvValue(nk) + if !okc || !okk { + return + } + cert, err := e.ReadFile(vc) + if err != nil { + global.Error(err, "read tls client cert", "file", vc) + return + } + key, err := e.ReadFile(vk) + if err != nil { + global.Error(err, "read tls client key", "file", vk) + return + } + crt, err := tls.X509KeyPair(cert, key) + if err != nil { + global.Error(err, "create tls client key pair") + return + } + fn(crt) + } +} + +func keyWithNamespace(ns, key string) string { + if ns == "" { + return key + } + return fmt.Sprintf("%s_%s", ns, key) +} + +func stringToHeader(value string) map[string]string { + headersPairs := strings.Split(value, ",") + headers := make(map[string]string) + + for _, header := range headersPairs { + nameValue := strings.SplitN(header, "=", 2) + if len(nameValue) < 2 { + global.Error(errors.New("missing '="), "parse headers", "input", nameValue) + continue + } + name, err := url.QueryUnescape(nameValue[0]) + if err != nil { + global.Error(err, "escape header key", "key", nameValue[0]) + continue + } + trimmedName := strings.TrimSpace(name) + value, err := url.QueryUnescape(nameValue[1]) + if err != nil { + global.Error(err, "escape header value", "value", nameValue[1]) + continue + } + trimmedValue := strings.TrimSpace(value) + + headers[trimmedName] = trimmedValue + } + + return headers +} + +func createCertPool(certBytes []byte) (*x509.CertPool, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + return cp, nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go new file mode 100644 index 0000000000..9aa62ed9e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal contains common functionality for all OTLP exporters. +package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" + +import "go.opentelemetry.io/otel" + +// GetUserAgentHeader return an OTLP header value form "OTel OTLP Exporter Go/{{ .Version }}" +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#user-agent +func GetUserAgentHeader() string { + return "OTel OTLP Exporter Go/" + otel.Version() +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go new file mode 100644 index 0000000000..9ab89b3757 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" + +import "fmt" + +// PartialSuccess represents the underlying error for all handling +// OTLP partial success messages. Use `errors.Is(err, +// PartialSuccess{})` to test whether an error passed to the OTel +// error handler belongs to this category. +type PartialSuccess struct { + ErrorMessage string + RejectedItems int64 + RejectedKind string +} + +var _ error = PartialSuccess{} + +// Error implements the error interface. +func (ps PartialSuccess) Error() string { + msg := ps.ErrorMessage + if msg == "" { + msg = "empty message" + } + return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) +} + +// Is supports the errors.Is() interface. +func (ps PartialSuccess) Is(err error) bool { + _, ok := err.(PartialSuccess) + return ok +} + +// TracePartialSuccessError returns an error describing a partial success +// response for the trace signal. +func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "spans", + } +} + +// MetricPartialSuccessError returns an error describing a partial success +// response for the metric signal. +func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "metric data points", + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go index 3d43f7aea9..7e1b0055a7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go @@ -76,21 +76,21 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { } } - // Do not use NewExponentialBackOff since it calls Reset and the code here - // must call Reset after changing the InitialInterval (this saves an - // unnecessary call to Now). - b := &backoff.ExponentialBackOff{ - InitialInterval: c.InitialInterval, - RandomizationFactor: backoff.DefaultRandomizationFactor, - Multiplier: backoff.DefaultMultiplier, - MaxInterval: c.MaxInterval, - MaxElapsedTime: c.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, - } - b.Reset() - return func(ctx context.Context, fn func(context.Context) error) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here + // must call Reset after changing the InitialInterval (this saves an + // unnecessary call to Now). + b := &backoff.ExponentialBackOff{ + InitialInterval: c.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: c.MaxInterval, + MaxElapsedTime: c.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + b.Reset() + for { err := fn(ctx) if err == nil { @@ -119,8 +119,8 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { delay = throttle } - if err := waitFunc(ctx, delay); err != nil { - return err + if ctxErr := waitFunc(ctx, delay); ctxErr != nil { + return fmt.Errorf("%w: %s", ctxErr, err) } } } @@ -129,6 +129,9 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { // Allow override for testing. var waitFunc = wait +// wait takes the caller's context, and the amount of time to wait. It will +// return nil if the timer fires before or at the same time as the context's +// deadline. This indicates that the call can be retried. func wait(ctx context.Context, delay time.Duration) error { timer := time.NewTimer(delay) defer timer.Stop() diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go new file mode 100644 index 0000000000..217751da55 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" + +// ErrorKind is used to identify the kind of export error +// being wrapped. +type ErrorKind int + +const ( + // TracesExport indicates the error comes from the OTLP trace exporter. + TracesExport ErrorKind = iota +) + +// prefix returns a prefix for the Error() string. +func (k ErrorKind) prefix() string { + switch k { + case TracesExport: + return "traces export: " + default: + return "unknown: " + } +} + +// wrappedExportError wraps an OTLP exporter error with the kind of +// signal that produced it. +type wrappedExportError struct { + wrap error + kind ErrorKind +} + +// WrapTracesError wraps an error from the OTLP exporter for traces. +func WrapTracesError(err error) error { + return wrappedExportError{ + wrap: err, + kind: TracesExport, + } +} + +var _ error = wrappedExportError{} + +// Error attaches a prefix corresponding to the kind of exporter. +func (t wrappedExportError) Error() string { + return t.kind.prefix() + t.wrap.Error() +} + +// Unwrap returns the wrapped error. +func (t wrappedExportError) Unwrap() error { + return t.wrap +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md index 8a40a86a24..6e9cc0366b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md @@ -12,8 +12,8 @@ go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace ## Examples -- [Exporter setup and examples](./otlptracehttp/example_test.go) -- [Full example sending telemetry to a local collector](../../../example/otel-collector) +- [HTTP Exporter setup and examples](./otlptracehttp/example_test.go) +- [Full example of gRPC Exporter sending telemetry to a local collector](../../../example/otel-collector) ## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) @@ -38,12 +38,14 @@ override the default configuration. For more information about how each of these environment variables is interpreted, see [the OpenTelemetry specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md). -| Environment variable | Option | Default value | -| ------------------------------------------------------------------------ |------------------------------ | ----------------------------------- | -| `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` | `WithEndpoint` `WithInsecure` | `https://localhost:4317` | -| `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig` | | -| `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS` | `WithHeaders` | | -| `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression` | | -| `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` | `WithTimeout` | `10s` | +| Environment variable | Option | Default value | +| ------------------------------------------------------------------------ |------------------------------ | -------------------------------------------------------- | +| `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` | `WithEndpoint` `WithInsecure` | `https://localhost:4317` or `https://localhost:4318`[^1] | +| `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig` | | +| `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS` | `WithHeaders` | | +| `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression` | | +| `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` | `WithTimeout` | `10s` | + +[^1]: The gRPC client defaults to `https://localhost:4317` and the HTTP client `https://localhost:4318`. Configuration using options have precedence over the environment variables. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go index 7e9bb6c47a..b65802edbb 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go @@ -19,6 +19,7 @@ import ( "errors" "sync" + "go.opentelemetry.io/otel/exporters/otlp/internal" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" tracesdk "go.opentelemetry.io/otel/sdk/trace" ) @@ -45,7 +46,11 @@ func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) return nil } - return e.client.UploadTraces(ctx, protoSpans) + err := e.client.UploadTraces(ctx, protoSpans) + if err != nil { + return internal.WrapTracesError(err) + } + return nil } // Start establishes a connection to the receiving endpoint. @@ -100,3 +105,14 @@ func NewUnstarted(client Client) *Exporter { client: client, } } + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (e *Exporter) MarshalLog() interface{} { + return struct { + Type string + Client Client + }{ + Type: "otlptrace", + Client: e.client, + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go index 77f13a1937..62c5029db2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go @@ -16,66 +16,59 @@ package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/ import ( "crypto/tls" - "fmt" - "io/ioutil" + "crypto/x509" "net/url" "os" "path" - "strconv" "strings" "time" - "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig" ) -var DefaultEnvOptionsReader = EnvOptionsReader{ - GetEnv: os.Getenv, - ReadFile: ioutil.ReadFile, +// DefaultEnvOptionsReader is the default environments reader. +var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ + GetEnv: os.Getenv, + ReadFile: os.ReadFile, + Namespace: "OTEL_EXPORTER_OTLP", } +// ApplyGRPCEnvConfigs applies the env configurations for gRPC. func ApplyGRPCEnvConfigs(cfg Config) Config { - return DefaultEnvOptionsReader.ApplyGRPCEnvConfigs(cfg) -} - -func ApplyHTTPEnvConfigs(cfg Config) Config { - return DefaultEnvOptionsReader.ApplyHTTPEnvConfigs(cfg) -} - -type EnvOptionsReader struct { - GetEnv func(string) string - ReadFile func(filename string) ([]byte, error) -} - -func (e *EnvOptionsReader) ApplyHTTPEnvConfigs(cfg Config) Config { - opts := e.GetOptionsFromEnv() - for _, opt := range opts { - cfg = opt.ApplyHTTPOption(cfg) - } - return cfg -} - -func (e *EnvOptionsReader) ApplyGRPCEnvConfigs(cfg Config) Config { - opts := e.GetOptionsFromEnv() + opts := getOptionsFromEnv() for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) } return cfg } -func (e *EnvOptionsReader) GetOptionsFromEnv() []GenericOption { - var opts []GenericOption +// ApplyHTTPEnvConfigs applies the env configurations for HTTP. +func ApplyHTTPEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + return cfg +} - // Endpoint - if v, ok := e.getEnvValue("TRACES_ENDPOINT"); ok { - u, err := url.Parse(v) - // Ignore invalid values. - if err == nil { - // This is used to set the scheme for OTLP/HTTP. - if insecureSchema(u.Scheme) { - opts = append(opts, WithInsecure()) - } else { - opts = append(opts, WithSecure()) - } +func getOptionsFromEnv() []GenericOption { + opts := []GenericOption{} + + tlsConf := &tls.Config{} + DefaultEnvOptionsReader.Apply( + envconfig.WithURL("ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Traces.Endpoint = u.Host + // For OTLP/HTTP endpoint URLs without a per-signal + // configuration, the passed endpoint is used as a base URL + // and the signals are sent to these paths relative to that. + cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath) + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Traces.Endpoint = u.Host // For endpoint URLs for OTLP/HTTP per-signal variables, the @@ -88,140 +81,70 @@ func (e *EnvOptionsReader) GetOptionsFromEnv() []GenericOption { } cfg.Traces.URLPath = path return cfg - }, func(cfg Config) Config { - // For OTLP/gRPC endpoints, this is the target to which the - // exporter is going to send telemetry. - cfg.Traces.Endpoint = path.Join(u.Host, u.Path) - return cfg - })) - } - } else if v, ok = e.getEnvValue("ENDPOINT"); ok { - u, err := url.Parse(v) - // Ignore invalid values. - if err == nil { - // This is used to set the scheme for OTLP/HTTP. - if insecureSchema(u.Scheme) { - opts = append(opts, WithInsecure()) - } else { - opts = append(opts, WithSecure()) - } - opts = append(opts, newSplitOption(func(cfg Config) Config { - cfg.Traces.Endpoint = u.Host - // For OTLP/HTTP endpoint URLs without a per-signal - // configuration, the passed endpoint is used as a base URL - // and the signals are sent to these paths relative to that. - cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath) - return cfg - }, func(cfg Config) Config { - // For OTLP/gRPC endpoints, this is the target to which the - // exporter is going to send telemetry. - cfg.Traces.Endpoint = path.Join(u.Host, u.Path) - return cfg - })) - } - } - - // Certificate File - if path, ok := e.getEnvValue("CERTIFICATE"); ok { - if tls, err := e.readTLSConfig(path); err == nil { - opts = append(opts, WithTLSClientConfig(tls)) - } else { - otel.Handle(fmt.Errorf("failed to configure otlp exporter certificate '%s': %w", path, err)) - } - } - if path, ok := e.getEnvValue("TRACES_CERTIFICATE"); ok { - if tls, err := e.readTLSConfig(path); err == nil { - opts = append(opts, WithTLSClientConfig(tls)) - } else { - otel.Handle(fmt.Errorf("failed to configure otlp traces exporter certificate '%s': %w", path, err)) - } - } - - // Headers - if h, ok := e.getEnvValue("HEADERS"); ok { - opts = append(opts, WithHeaders(stringToHeader(h))) - } - if h, ok := e.getEnvValue("TRACES_HEADERS"); ok { - opts = append(opts, WithHeaders(stringToHeader(h))) - } - - // Compression - if c, ok := e.getEnvValue("COMPRESSION"); ok { - opts = append(opts, WithCompression(stringToCompression(c))) - } - if c, ok := e.getEnvValue("TRACES_COMPRESSION"); ok { - opts = append(opts, WithCompression(stringToCompression(c))) - } - // Timeout - if t, ok := e.getEnvValue("TIMEOUT"); ok { - if d, err := strconv.Atoi(t); err == nil { - opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond)) - } - } - if t, ok := e.getEnvValue("TRACES_TIMEOUT"); ok { - if d, err := strconv.Atoi(t); err == nil { - opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond)) - } - } + }, withEndpointForGRPC(u))) + }), + envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), + envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + ) return opts } -func insecureSchema(schema string) bool { - switch strings.ToLower(schema) { +func withEndpointScheme(u *url.URL) GenericOption { + switch strings.ToLower(u.Scheme) { case "http", "unix": - return true + return WithInsecure() default: - return false + return WithSecure() } } -// getEnvValue gets an OTLP environment variable value of the specified key using the GetEnv function. -// This function already prepends the OTLP prefix to all key lookup. -func (e *EnvOptionsReader) getEnvValue(key string) (string, bool) { - v := strings.TrimSpace(e.GetEnv(fmt.Sprintf("OTEL_EXPORTER_OTLP_%s", key))) - return v, v != "" -} - -func (e *EnvOptionsReader) readTLSConfig(path string) (*tls.Config, error) { - b, err := e.ReadFile(path) - if err != nil { - return nil, err +func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { + return func(cfg Config) Config { + // For OTLP/gRPC endpoints, this is the target to which the + // exporter is going to send telemetry. + cfg.Traces.Endpoint = path.Join(u.Host, u.Path) + return cfg } - return CreateTLSConfig(b) } -func stringToCompression(value string) Compression { - switch value { - case "gzip": - return GzipCompression - } +// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. +func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + cp := NoCompression + if v == "gzip" { + cp = GzipCompression + } - return NoCompression -} - -func stringToHeader(value string) map[string]string { - headersPairs := strings.Split(value, ",") - headers := make(map[string]string) - - for _, header := range headersPairs { - nameValue := strings.SplitN(header, "=", 2) - if len(nameValue) < 2 { - continue + fn(cp) + } + } +} + +// revive:disable-next-line:flag-parameter +func withInsecure(b bool) GenericOption { + if b { + return WithInsecure() + } + return WithSecure() +} + +func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if c.RootCAs != nil || len(c.Certificates) > 0 { + fn(c) } - name, err := url.QueryUnescape(nameValue[0]) - if err != nil { - continue - } - trimmedName := strings.TrimSpace(name) - value, err := url.QueryUnescape(nameValue[1]) - if err != nil { - continue - } - trimmedValue := strings.TrimSpace(value) - - headers[trimmedName] = trimmedValue } - - return headers } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go index e6fb14e00e..c48ffd5308 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" + "go.opentelemetry.io/otel/exporters/otlp/internal" "go.opentelemetry.io/otel/exporters/otlp/internal/retry" ) @@ -65,24 +66,39 @@ type ( } ) -func NewDefaultConfig() Config { - c := Config{ +// NewHTTPConfig returns a new Config with all settings applied from opts and +// any unset setting using the default HTTP config values. +func NewHTTPConfig(opts ...HTTPOption) Config { + cfg := Config{ Traces: SignalConfig{ - Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorPort), + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), URLPath: DefaultTracesPath, Compression: NoCompression, Timeout: DefaultTimeout, }, RetryConfig: retry.DefaultConfig, } - - return c + cfg = ApplyHTTPEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + cfg.Traces.URLPath = internal.CleanPath(cfg.Traces.URLPath, DefaultTracesPath) + return cfg } // NewGRPCConfig returns a new Config with all settings applied from opts and // any unset setting using the default gRPC config values. func NewGRPCConfig(opts ...GRPCOption) Config { - cfg := NewDefaultConfig() + cfg := Config{ + Traces: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), + URLPath: DefaultTracesPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, + DialOptions: []grpc.DialOption{grpc.WithUserAgent(internal.GetUserAgentHeader())}, + } cfg = ApplyGRPCEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go index f69e31095d..c2d6c03615 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go @@ -15,9 +15,10 @@ package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" const ( - // DefaultCollectorPort is the port the Exporter will attempt connect to - // if no collector port is provided. - DefaultCollectorPort uint16 = 4317 + // DefaultCollectorGRPCPort is the default gRPC port of the collector. + DefaultCollectorGRPCPort uint16 = 4317 + // DefaultCollectorHTTPPort is the default HTTP port of the collector. + DefaultCollectorHTTPPort uint16 = 4318 // DefaultCollectorHost is the host address the Exporter will attempt // connect to if no collector address is provided. DefaultCollectorHost string = "localhost" @@ -36,7 +37,7 @@ const ( GzipCompression ) -// Marshaler describes the kind of message format sent to the collector +// Marshaler describes the kind of message format sent to the collector. type Marshaler int const ( diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index d9086a390d..ec74f1aad7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -48,8 +48,8 @@ func Iterator(iter attribute.Iterator) []*commonpb.KeyValue { } // ResourceAttributes transforms a Resource OTLP key-values. -func ResourceAttributes(resource *resource.Resource) []*commonpb.KeyValue { - return Iterator(resource.Iter()) +func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue { + return Iterator(res.Iter()) } // KeyValue transforms an attribute KeyValue into an OTLP key-value. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index 6246b17f57..7aaec38d22 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -19,11 +19,11 @@ import ( commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) -func InstrumentationLibrary(il instrumentation.Library) *commonpb.InstrumentationLibrary { - if il == (instrumentation.Library{}) { +func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope { + if il == (instrumentation.Scope{}) { return nil } - return &commonpb.InstrumentationLibrary{ + return &commonpb.InstrumentationScope{ Name: il.Name, Version: il.Version, } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index 2f0f5eacb7..b83cbd7247 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -23,10 +23,6 @@ import ( tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) -const ( - maxEventsPerSpan = 128 -) - // Spans transforms a slice of OpenTelemetry spans into a slice of OTLP // ResourceSpans. func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { @@ -36,11 +32,11 @@ func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans) - type ilsKey struct { + type key struct { r attribute.Distinct - il instrumentation.Library + is instrumentation.Scope } - ilsm := make(map[ilsKey]*tracepb.InstrumentationLibrarySpans) + ssm := make(map[key]*tracepb.ScopeSpans) var resources int for _, sd := range sdl { @@ -49,30 +45,30 @@ func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { } rKey := sd.Resource().Equivalent() - iKey := ilsKey{ + k := key{ r: rKey, - il: sd.InstrumentationLibrary(), + is: sd.InstrumentationScope(), } - ils, iOk := ilsm[iKey] + scopeSpan, iOk := ssm[k] if !iOk { - // Either the resource or instrumentation library were unknown. - ils = &tracepb.InstrumentationLibrarySpans{ - InstrumentationLibrary: InstrumentationLibrary(sd.InstrumentationLibrary()), - Spans: []*tracepb.Span{}, - SchemaUrl: sd.InstrumentationLibrary().SchemaURL, + // Either the resource or instrumentation scope were unknown. + scopeSpan = &tracepb.ScopeSpans{ + Scope: InstrumentationScope(sd.InstrumentationScope()), + Spans: []*tracepb.Span{}, + SchemaUrl: sd.InstrumentationScope().SchemaURL, } } - ils.Spans = append(ils.Spans, span(sd)) - ilsm[iKey] = ils + scopeSpan.Spans = append(scopeSpan.Spans, span(sd)) + ssm[k] = scopeSpan rs, rOk := rsm[rKey] if !rOk { resources++ // The resource was unknown. rs = &tracepb.ResourceSpans{ - Resource: Resource(sd.Resource()), - InstrumentationLibrarySpans: []*tracepb.InstrumentationLibrarySpans{ils}, - SchemaUrl: sd.Resource().SchemaURL(), + Resource: Resource(sd.Resource()), + ScopeSpans: []*tracepb.ScopeSpans{scopeSpan}, + SchemaUrl: sd.Resource().SchemaURL(), } rsm[rKey] = rs continue @@ -82,9 +78,9 @@ func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { // library lookup was unknown because if so we need to add it to the // ResourceSpans. Otherwise, the instrumentation library has already // been seen and the append we did above will be included it in the - // InstrumentationLibrarySpans reference. + // ScopeSpans reference. if !iOk { - rs.InstrumentationLibrarySpans = append(rs.InstrumentationLibrarySpans, ils) + rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan) } } @@ -162,9 +158,10 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { sid := otLink.SpanContext.SpanID() sl = append(sl, &tracepb.Span_Link{ - TraceId: tid[:], - SpanId: sid[:], - Attributes: KeyValues(otLink.Attributes), + TraceId: tid[:], + SpanId: sid[:], + Attributes: KeyValues(otLink.Attributes), + DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), }) } return sl @@ -176,29 +173,16 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { return nil } - evCount := len(es) - if evCount > maxEventsPerSpan { - evCount = maxEventsPerSpan - } - events := make([]*tracepb.Span_Event, 0, evCount) - nEvents := 0 - + events := make([]*tracepb.Span_Event, len(es)) // Transform message events - for _, e := range es { - if nEvents >= maxEventsPerSpan { - break + for i := 0; i < len(es); i++ { + events[i] = &tracepb.Span_Event{ + Name: es[i].Name, + TimeUnixNano: uint64(es[i].Time.UnixNano()), + Attributes: KeyValues(es[i].Attributes), + DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), } - nEvents++ - events = append(events, - &tracepb.Span_Event{ - Name: e.Name, - TimeUnixNano: uint64(e.Time.UnixNano()), - Attributes: KeyValues(e.Attributes), - // TODO (rghetia) : Add Drop Counts when supported. - }, - ) } - return events } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index d709ffa96e..fe23f8e376 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -26,6 +26,8 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/internal" "go.opentelemetry.io/otel/exporters/otlp/internal/retry" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" @@ -196,9 +198,17 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc defer cancel() return c.requestFunc(ctx, func(iCtx context.Context) error { - _, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ + resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ ResourceSpans: protoSpans, }) + if resp != nil && resp.PartialSuccess != nil { + msg := resp.PartialSuccess.GetErrorMessage() + n := resp.PartialSuccess.GetRejectedSpans() + if n != 0 || msg != "" { + err := internal.TracePartialSuccessError(n, msg) + otel.Handle(err) + } + } // nil is converted to OK. if status.Code(err) == codes.OK { // Success. @@ -265,11 +275,22 @@ func retryable(err error) (bool, time.Duration) { // throttleDelay returns a duration to wait for if an explicit throttle time // is included in the response status. -func throttleDelay(status *status.Status) time.Duration { - for _, detail := range status.Details() { +func throttleDelay(s *status.Status) time.Duration { + for _, detail := range s.Details() { if t, ok := detail.(*errdetails.RetryInfo); ok { return t.RetryDelay.AsDuration() } } return 0 } + +// MarshalLog is the marshaling function used by the logging system to represent this Client. +func (c *client) MarshalLog() interface{} { + return struct { + Type string + Endpoint string + }{ + Type: "otlphttpgrpc", + Endpoint: c.endpoint, + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index e2e5bd696f..3d09ce590d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -84,8 +84,7 @@ func WithReconnectionPeriod(rp time.Duration) Option { } func compressorToCompression(compressor string) otlpconfig.Compression { - switch compressor { - case "gzip": + if compressor == "gzip" { return otlpconfig.GzipCompression } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go index 81487f9b6f..9fbe861717 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -20,18 +20,17 @@ import ( "context" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" - "path" "strconv" - "strings" "sync" "time" "google.golang.org/protobuf/proto" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/internal" "go.opentelemetry.io/otel/exporters/otlp/internal/retry" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" @@ -43,7 +42,7 @@ const contentTypeProto = "application/x-protobuf" var gzPool = sync.Pool{ New: func() interface{} { - w := gzip.NewWriter(ioutil.Discard) + w := gzip.NewWriter(io.Discard) return w }, } @@ -79,26 +78,7 @@ var _ otlptrace.Client = (*client)(nil) // NewClient creates a new HTTP trace client. func NewClient(opts ...Option) otlptrace.Client { - cfg := otlpconfig.NewDefaultConfig() - cfg = otlpconfig.ApplyHTTPEnvConfigs(cfg) - for _, opt := range opts { - cfg = opt.applyHTTPOption(cfg) - } - - for pathPtr, defaultPath := range map[*string]string{ - &cfg.Traces.URLPath: otlpconfig.DefaultTracesPath, - } { - tmp := strings.TrimSpace(*pathPtr) - if tmp == "" { - tmp = defaultPath - } else { - tmp = path.Clean(tmp) - if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) - } - } - *pathPtr = tmp - } + cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...) httpClient := &http.Client{ Transport: ourTransport, @@ -121,7 +101,7 @@ func NewClient(opts ...Option) otlptrace.Client { } } -// Start does nothing in a HTTP client +// Start does nothing in a HTTP client. func (d *client) Start(ctx context.Context) error { // nothing to do select { @@ -176,28 +156,49 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc return err } - var rErr error + if resp != nil && resp.Body != nil { + defer func() { + if err := resp.Body.Close(); err != nil { + otel.Handle(err) + } + }() + } + switch resp.StatusCode { case http.StatusOK: // Success, do not retry. - case http.StatusTooManyRequests, - http.StatusServiceUnavailable: - // Retry-able failure. - rErr = newResponseError(resp.Header) - - // Going to retry, drain the body to reuse the connection. - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { - _ = resp.Body.Close() + // Read the partial success message, if any. + var respData bytes.Buffer + if _, err := io.Copy(&respData, resp.Body); err != nil { return err } - default: - rErr = fmt.Errorf("failed to send %s to %s: %s", d.name, request.URL, resp.Status) - } - if err := resp.Body.Close(); err != nil { - return err + if respData.Len() != 0 { + var respProto coltracepb.ExportTraceServiceResponse + if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil { + return err + } + + if respProto.PartialSuccess != nil { + msg := respProto.PartialSuccess.GetErrorMessage() + n := respProto.PartialSuccess.GetRejectedSpans() + if n != 0 || msg != "" { + err := internal.TracePartialSuccessError(n, msg) + otel.Handle(err) + } + } + } + return nil + + case http.StatusTooManyRequests, http.StatusServiceUnavailable: + // Retry-able failures. Drain the body to reuse the connection. + if _, err := io.Copy(io.Discard, resp.Body); err != nil { + otel.Handle(err) + } + return newResponseError(resp.Header) + default: + return fmt.Errorf("failed to send to %s: %s", request.URL, resp.Status) } - return rErr }) } @@ -208,6 +209,8 @@ func (d *client) newRequest(body []byte) (request, error) { return request{Request: r}, err } + r.Header.Set("User-Agent", internal.GetUserAgentHeader()) + for k, v := range d.cfg.Headers { r.Header.Set(k, v) } @@ -243,10 +246,23 @@ func (d *client) newRequest(body []byte) (request, error) { return req, nil } +// MarshalLog is the marshaling function used by the logging system to represent this Client. +func (d *client) MarshalLog() interface{} { + return struct { + Type string + Endpoint string + Insecure bool + }{ + Type: "otlphttphttp", + Endpoint: d.cfg.Endpoint, + Insecure: d.cfg.Insecure, + } +} + // bodyReader returns a closure returning a new reader for buf. func bodyReader(buf []byte) func() io.ReadCloser { return func() io.ReadCloser { - return ioutil.NopCloser(bytes.NewReader(buf)) + return io.NopCloser(bytes.NewReader(buf)) } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go index e550cfb5d5..4257ce3473 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go @@ -40,6 +40,14 @@ type Option interface { applyHTTPOption(otlpconfig.Config) otlpconfig.Config } +func asHTTPOptions(opts []Option) []otlpconfig.HTTPOption { + converted := make([]otlpconfig.HTTPOption, len(opts)) + for i, o := range opts { + converted[i] = otlpconfig.NewHTTPOption(o.applyHTTPOption) + } + return converted +} + // RetryConfig defines configuration for retrying batches in case of export // failure using an exponential backoff. type RetryConfig retry.Config @@ -55,7 +63,7 @@ func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config // WithEndpoint allows one to set the address of the collector // endpoint that the driver will use to send spans. If // unset, it will instead try to use -// the default endpoint (localhost:4317). Note that the endpoint +// the default endpoint (localhost:4318). Note that the endpoint // must not contain any URL path. func WithEndpoint(endpoint string) Option { return wrappedOption{otlpconfig.WithEndpoint(endpoint)} diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go index 35263e01ac..ecd363ab51 100644 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -17,7 +17,8 @@ package otel // import "go.opentelemetry.io/otel" import ( "log" "os" - "sync" + "sync/atomic" + "unsafe" ) var ( @@ -34,29 +35,26 @@ var ( ) type delegator struct { - lock *sync.RWMutex - eh ErrorHandler + delegate unsafe.Pointer } func (d *delegator) Handle(err error) { - d.lock.RLock() - defer d.lock.RUnlock() - d.eh.Handle(err) + d.getDelegate().Handle(err) +} + +func (d *delegator) getDelegate() ErrorHandler { + return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate)) } // setDelegate sets the ErrorHandler delegate. func (d *delegator) setDelegate(eh ErrorHandler) { - d.lock.Lock() - defer d.lock.Unlock() - d.eh = eh + atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh)) } func defaultErrorHandler() *delegator { - return &delegator{ - lock: &sync.RWMutex{}, - eh: &errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}, - } - + d := &delegator{} + d.setDelegate(&errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) + return d } // errLogger logs errors if no delegate is set, otherwise they are delegated. @@ -92,7 +90,7 @@ func SetErrorHandler(h ErrorHandler) { globalErrorHandler.setDelegate(h) } -// Handle is a convenience function for ErrorHandler().Handle(err) +// Handle is a convenience function for ErrorHandler().Handle(err). func Handle(err error) { GetErrorHandler().Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 0000000000..622c3ee3f2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) + return cp.Elem().Interface() +} + +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) + return cp.Elem().Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) + return cp.Elem().Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) + return cp.Elem().Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero string + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go index 3c2784eea3..4469700d9c 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -39,8 +39,7 @@ type baggageState struct { // Passing nil SetHookFunc creates a context with no set hook to call. func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { var s baggageState - switch v := parent.Value(baggageKey).(type) { - case baggageState: + if v, ok := parent.Value(baggageKey).(baggageState); ok { s = v } @@ -54,8 +53,7 @@ func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Contex // Passing nil GetHookFunc creates a context with no get hook to call. func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { var s baggageState - switch v := parent.Value(baggageKey).(type) { - case baggageState: + if v, ok := parent.Value(baggageKey).(baggageState); ok { s = v } @@ -67,8 +65,7 @@ func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Contex // returns a context without any baggage. func ContextWithList(parent context.Context, list List) context.Context { var s baggageState - switch v := parent.Value(baggageKey).(type) { - case baggageState: + if v, ok := parent.Value(baggageKey).(baggageState); ok { s = v } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index 0a378476b0..293c08961f 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -17,7 +17,8 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" "os" - "sync" + "sync/atomic" + "unsafe" "github.com/go-logr/logr" "github.com/go-logr/stdr" @@ -27,37 +28,36 @@ import ( // // The default logger uses stdr which is backed by the standard `log.Logger` // interface. This logger will only show messages at the Error Level. -var globalLogger logr.Logger = stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) -var globalLoggerLock = &sync.RWMutex{} +var globalLogger unsafe.Pointer + +func init() { + SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +} // SetLogger overrides the globalLogger with l. // // To see Info messages use a logger with `l.V(1).Enabled() == true` -// To see Debug messages use a logger with `l.V(5).Enabled() == true` +// To see Debug messages use a logger with `l.V(5).Enabled() == true`. func SetLogger(l logr.Logger) { - globalLoggerLock.Lock() - defer globalLoggerLock.Unlock() - globalLogger = l + atomic.StorePointer(&globalLogger, unsafe.Pointer(&l)) +} + +func getLogger() logr.Logger { + return *(*logr.Logger)(atomic.LoadPointer(&globalLogger)) } // Info prints messages about the general state of the API or SDK. -// This should usually be less then 5 messages a minute +// This should usually be less then 5 messages a minute. func Info(msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.V(1).Info(msg, keysAndValues...) + getLogger().V(1).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. func Error(err error, msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.Error(err, msg, keysAndValues...) + getLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. func Debug(msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.V(5).Info(msg, keysAndValues...) + getLogger().V(5).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go index d6b3e900cd..1ad38f828e 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -15,6 +15,7 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( + "errors" "sync" "sync/atomic" @@ -47,17 +48,24 @@ func TracerProvider() trace.TracerProvider { // SetTracerProvider is the internal implementation for global.SetTracerProvider. func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to it's current value. No delegate will be configured", + ) + return + } + } + delegateTraceOnce.Do(func() { - current := TracerProvider() - if current == tp { - // Setting the provider to the prior default is nonsense, panic. - // Panic is acceptable because we are likely still early in the - // process lifetime. - panic("invalid TracerProvider, the global instance cannot be reinstalled") - } else if def, ok := current.(*tracerProvider); ok { + if def, ok := current.(*tracerProvider); ok { def.setDelegate(tp) } - }) globalTracer.Store(tracerProviderHolder{tp: tp}) } @@ -69,15 +77,24 @@ func TextMapPropagator() propagation.TextMapPropagator { // SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to it's current value. No delegate will be configured", + ) + return + } + } + // For the textMapPropagator already returned by TextMapPropagator // delegate to p. delegateTextMapPropagatorOnce.Do(func() { - if current := TextMapPropagator(); current == p { - // Setting the provider to the prior default is nonsense, panic. - // Panic is acceptable because we are likely still early in the - // process lifetime. - panic("invalid TextMapPropagator, the global instance cannot be reinstalled") - } else if def, ok := current.(*textMapPropagator); ok { + if def, ok := current.(*textMapPropagator); ok { def.SetDelegate(p) } }) @@ -96,11 +113,3 @@ func defaultPropagatorsValue() *atomic.Value { v.Store(propagatorsHolder{tm: newTextMapPropagator()}) return v } - -// ResetForTest restores the initial global state, for testing purposes. -func ResetForTest() { - globalTracer = defaultTracerValue() - globalPropagators = defaultPropagatorsValue() - delegateTraceOnce = sync.Once{} - delegateTextMapPropagatorOnce = sync.Once{} -} diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go deleted file mode 100644 index 77781ead11..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/metric/global" - -import ( - "context" - "sync" - "sync/atomic" - "unsafe" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/internal/metric/registry" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// This file contains the forwarding implementation of MeterProvider used as -// the default global instance. Metric events using instruments provided by -// this implementation are no-ops until the first Meter implementation is set -// as the global provider. -// -// The implementation here uses Mutexes to maintain a list of active Meters in -// the MeterProvider and Instruments in each Meter, under the assumption that -// these interfaces are not performance-critical. -// -// We have the invariant that setDelegate() will be called before a new -// MeterProvider implementation is registered as the global provider. Mutexes -// in the MeterProvider and Meters ensure that each instrument has a delegate -// before the global provider is set. -// -// Metric uniqueness checking is implemented by calling the exported -// methods of the api/metric/registry package. - -type meterKey struct { - InstrumentationName string - InstrumentationVersion string - SchemaURL string -} - -type meterProvider struct { - delegate metric.MeterProvider - - // lock protects `delegate` and `meters`. - lock sync.Mutex - - // meters maintains a unique entry for every named Meter - // that has been registered through the global instance. - meters map[meterKey]*meterEntry -} - -type meterImpl struct { - delegate unsafe.Pointer // (*metric.MeterImpl) - - lock sync.Mutex - syncInsts []*syncImpl - asyncInsts []*asyncImpl -} - -type meterEntry struct { - unique sdkapi.MeterImpl - impl meterImpl -} - -type instrument struct { - descriptor sdkapi.Descriptor -} - -type syncImpl struct { - delegate unsafe.Pointer // (*sdkapi.SyncImpl) - - instrument -} - -type asyncImpl struct { - delegate unsafe.Pointer // (*sdkapi.AsyncImpl) - - instrument - - runner sdkapi.AsyncRunner -} - -var _ metric.MeterProvider = &meterProvider{} -var _ sdkapi.MeterImpl = &meterImpl{} -var _ sdkapi.InstrumentImpl = &syncImpl{} -var _ sdkapi.AsyncImpl = &asyncImpl{} - -func (inst *instrument) Descriptor() sdkapi.Descriptor { - return inst.descriptor -} - -// MeterProvider interface and delegation - -func newMeterProvider() *meterProvider { - return &meterProvider{ - meters: map[meterKey]*meterEntry{}, - } -} - -func (p *meterProvider) setDelegate(provider metric.MeterProvider) { - p.lock.Lock() - defer p.lock.Unlock() - - p.delegate = provider - for key, entry := range p.meters { - entry.impl.setDelegate(key, provider) - } - p.meters = nil -} - -func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { - p.lock.Lock() - defer p.lock.Unlock() - - if p.delegate != nil { - return p.delegate.Meter(instrumentationName, opts...) - } - - cfg := metric.NewMeterConfig(opts...) - key := meterKey{ - InstrumentationName: instrumentationName, - InstrumentationVersion: cfg.InstrumentationVersion(), - SchemaURL: cfg.SchemaURL(), - } - entry, ok := p.meters[key] - if !ok { - entry = &meterEntry{} - // Note: This code implements its own MeterProvider - // name-uniqueness logic because there is - // synchronization required at the moment of - // delegation. We use the same instrument-uniqueness - // checking the real SDK uses here: - entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl) - p.meters[key] = entry - } - return metric.WrapMeterImpl(entry.unique) -} - -// Meter interface and delegation - -func (m *meterImpl) setDelegate(key meterKey, provider metric.MeterProvider) { - m.lock.Lock() - defer m.lock.Unlock() - - d := new(sdkapi.MeterImpl) - *d = provider.Meter( - key.InstrumentationName, - metric.WithInstrumentationVersion(key.InstrumentationVersion), - metric.WithSchemaURL(key.SchemaURL), - ).MeterImpl() - m.delegate = unsafe.Pointer(d) - - for _, inst := range m.syncInsts { - inst.setDelegate(*d) - } - m.syncInsts = nil - for _, obs := range m.asyncInsts { - obs.setDelegate(*d) - } - m.asyncInsts = nil -} - -func (m *meterImpl) NewSyncInstrument(desc sdkapi.Descriptor) (sdkapi.SyncImpl, error) { - m.lock.Lock() - defer m.lock.Unlock() - - if meterPtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { - return (*meterPtr).NewSyncInstrument(desc) - } - - inst := &syncImpl{ - instrument: instrument{ - descriptor: desc, - }, - } - m.syncInsts = append(m.syncInsts, inst) - return inst, nil -} - -// Synchronous delegation - -func (inst *syncImpl) setDelegate(d sdkapi.MeterImpl) { - implPtr := new(sdkapi.SyncImpl) - - var err error - *implPtr, err = d.NewSyncInstrument(inst.descriptor) - - if err != nil { - // TODO: There is no standard way to deliver this error to the user. - // See https://github.com/open-telemetry/opentelemetry-go/issues/514 - // Note that the default SDK will not generate any errors yet, this is - // only for added safety. - panic(err) - } - - atomic.StorePointer(&inst.delegate, unsafe.Pointer(implPtr)) -} - -func (inst *syncImpl) Implementation() interface{} { - if implPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil { - return (*implPtr).Implementation() - } - return inst -} - -// Async delegation - -func (m *meterImpl) NewAsyncInstrument( - desc sdkapi.Descriptor, - runner sdkapi.AsyncRunner, -) (sdkapi.AsyncImpl, error) { - - m.lock.Lock() - defer m.lock.Unlock() - - if meterPtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { - return (*meterPtr).NewAsyncInstrument(desc, runner) - } - - inst := &asyncImpl{ - instrument: instrument{ - descriptor: desc, - }, - runner: runner, - } - m.asyncInsts = append(m.asyncInsts, inst) - return inst, nil -} - -func (obs *asyncImpl) Implementation() interface{} { - if implPtr := (*sdkapi.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil { - return (*implPtr).Implementation() - } - return obs -} - -func (obs *asyncImpl) setDelegate(d sdkapi.MeterImpl) { - implPtr := new(sdkapi.AsyncImpl) - - var err error - *implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner) - - if err != nil { - // TODO: There is no standard way to deliver this error to the user. - // See https://github.com/open-telemetry/opentelemetry-go/issues/514 - // Note that the default SDK will not generate any errors yet, this is - // only for added safety. - panic(err) - } - - atomic.StorePointer(&obs.delegate, unsafe.Pointer(implPtr)) -} - -// Metric updates - -func (m *meterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...sdkapi.Measurement) { - if delegatePtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil { - (*delegatePtr).RecordBatch(ctx, labels, measurements...) - } -} - -func (inst *syncImpl) RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) { - if instPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil { - (*instPtr).RecordOne(ctx, number, labels) - } -} - -func AtomicFieldOffsets() map[string]uintptr { - return map[string]uintptr{ - "meterProvider.delegate": unsafe.Offsetof(meterProvider{}.delegate), - "meterImpl.delegate": unsafe.Offsetof(meterImpl{}.delegate), - "syncImpl.delegate": unsafe.Offsetof(syncImpl{}.delegate), - "asyncImpl.delegate": unsafe.Offsetof(asyncImpl{}.delegate), - } -} diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go b/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go deleted file mode 100644 index c929bf45c8..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry // import "go.opentelemetry.io/otel/internal/metric/registry" - -import ( - "context" - "fmt" - "sync" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// UniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding -// uniqueness checking for instrument descriptors. -type UniqueInstrumentMeterImpl struct { - lock sync.Mutex - impl sdkapi.MeterImpl - state map[string]sdkapi.InstrumentImpl -} - -var _ sdkapi.MeterImpl = (*UniqueInstrumentMeterImpl)(nil) - -// ErrMetricKindMismatch is the standard error for mismatched metric -// instrument definitions. -var ErrMetricKindMismatch = fmt.Errorf( - "a metric was already registered by this name with another kind or number type") - -// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl -// with the addition of instrument name uniqueness checking. -func NewUniqueInstrumentMeterImpl(impl sdkapi.MeterImpl) *UniqueInstrumentMeterImpl { - return &UniqueInstrumentMeterImpl{ - impl: impl, - state: map[string]sdkapi.InstrumentImpl{}, - } -} - -// MeterImpl gives the caller access to the underlying MeterImpl -// used by this UniqueInstrumentMeterImpl. -func (u *UniqueInstrumentMeterImpl) MeterImpl() sdkapi.MeterImpl { - return u.impl -} - -// RecordBatch implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, ms ...sdkapi.Measurement) { - u.impl.RecordBatch(ctx, labels, ms...) -} - -// NewMetricKindMismatchError formats an error that describes a -// mismatched metric instrument definition. -func NewMetricKindMismatchError(desc sdkapi.Descriptor) error { - return fmt.Errorf("metric %s registered as %s %s: %w", - desc.Name(), - desc.NumberKind(), - desc.InstrumentKind(), - ErrMetricKindMismatch) -} - -// Compatible determines whether two sdkapi.Descriptors are considered -// the same for the purpose of uniqueness checking. -func Compatible(candidate, existing sdkapi.Descriptor) bool { - return candidate.InstrumentKind() == existing.InstrumentKind() && - candidate.NumberKind() == existing.NumberKind() -} - -// checkUniqueness returns an ErrMetricKindMismatch error if there is -// a conflict between a descriptor that was already registered and the -// `descriptor` argument. If there is an existing compatible -// registration, this returns the already-registered instrument. If -// there is no conflict and no prior registration, returns (nil, nil). -func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor sdkapi.Descriptor) (sdkapi.InstrumentImpl, error) { - impl, ok := u.state[descriptor.Name()] - if !ok { - return nil, nil - } - - if !Compatible(descriptor, impl.Descriptor()) { - return nil, NewMetricKindMismatchError(impl.Descriptor()) - } - - return impl, nil -} - -// NewSyncInstrument implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) { - u.lock.Lock() - defer u.lock.Unlock() - - impl, err := u.checkUniqueness(descriptor) - - if err != nil { - return nil, err - } else if impl != nil { - return impl.(sdkapi.SyncImpl), nil - } - - syncInst, err := u.impl.NewSyncInstrument(descriptor) - if err != nil { - return nil, err - } - u.state[descriptor.Name()] = syncInst - return syncInst, nil -} - -// NewAsyncInstrument implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) NewAsyncInstrument( - descriptor sdkapi.Descriptor, - runner sdkapi.AsyncRunner, -) (sdkapi.AsyncImpl, error) { - u.lock.Lock() - defer u.lock.Unlock() - - impl, err := u.checkUniqueness(descriptor) - - if err != nil { - return nil, err - } else if impl != nil { - return impl.(sdkapi.AsyncImpl), nil - } - - asyncInst, err := u.impl.NewAsyncInstrument(descriptor, runner) - if err != nil { - return nil, err - } - u.state[descriptor.Name()] = asyncInst - return asyncInst, nil -} diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index ce7afaa188..e07e794000 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -19,7 +19,7 @@ import ( "unsafe" ) -func BoolToRaw(b bool) uint64 { +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. if b { return 1 } diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index 3f722344fa..778ad2d748 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -14,84 +14,38 @@ package metric // import "go.opentelemetry.io/otel/metric" -import ( - "go.opentelemetry.io/otel/metric/unit" -) - -// InstrumentConfig contains options for metric instrument descriptors. -type InstrumentConfig struct { - description string - unit unit.Unit -} - -// Description describes the instrument in human-readable terms. -func (cfg *InstrumentConfig) Description() string { - return cfg.description -} - -// Unit describes the measurement unit for a instrument. -func (cfg *InstrumentConfig) Unit() unit.Unit { - return cfg.unit -} - -// InstrumentOption is an interface for applying metric instrument options. -type InstrumentOption interface { - // ApplyMeter is used to set a InstrumentOption value of a - // InstrumentConfig. - applyInstrument(InstrumentConfig) InstrumentConfig -} - -// NewInstrumentConfig creates a new InstrumentConfig -// and applies all the given options. -func NewInstrumentConfig(opts ...InstrumentOption) InstrumentConfig { - var config InstrumentConfig - for _, o := range opts { - config = o.applyInstrument(config) - } - return config -} - -type instrumentOptionFunc func(InstrumentConfig) InstrumentConfig - -func (fn instrumentOptionFunc) applyInstrument(cfg InstrumentConfig) InstrumentConfig { - return fn(cfg) -} - -// WithDescription applies provided description. -func WithDescription(desc string) InstrumentOption { - return instrumentOptionFunc(func(cfg InstrumentConfig) InstrumentConfig { - cfg.description = desc - return cfg - }) -} - -// WithUnit applies provided unit. -func WithUnit(unit unit.Unit) InstrumentOption { - return instrumentOptionFunc(func(cfg InstrumentConfig) InstrumentConfig { - cfg.unit = unit - return cfg - }) -} +import "go.opentelemetry.io/otel/attribute" // MeterConfig contains options for Meters. type MeterConfig struct { instrumentationVersion string schemaURL string + attrs attribute.Set + + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. } -// InstrumentationVersion is the version of the library providing instrumentation. -func (cfg *MeterConfig) InstrumentationVersion() string { +// InstrumentationVersion returns the version of the library providing +// instrumentation. +func (cfg MeterConfig) InstrumentationVersion() string { return cfg.instrumentationVersion } +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { + return cfg.attrs +} + // SchemaURL is the schema_url of the library providing instrumentation. -func (cfg *MeterConfig) SchemaURL() string { +func (cfg MeterConfig) SchemaURL() string { return cfg.schemaURL } // MeterOption is an interface for applying Meter options. type MeterOption interface { - // ApplyMeter is used to set a MeterOption value of a MeterConfig. + // applyMeter is used to set a MeterOption value of a MeterConfig. applyMeter(MeterConfig) MeterConfig } @@ -119,6 +73,16 @@ func WithInstrumentationVersion(version string) MeterOption { }) } +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + // WithSchemaURL sets the schema URL. func WithSchemaURL(schemaURL string) MeterOption { return meterOptionFunc(func(config MeterConfig) MeterConfig { diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index 4baf0719fc..bd6f434372 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -19,49 +19,5 @@ OpenTelemetry API. This package is currently in a pre-GA phase. Backwards incompatible changes may be introduced in subsequent minor version releases as we work to track the evolving OpenTelemetry specification and user feedback. - -Measurements can be made about an operation being performed or the state of a -system in general. These measurements can be crucial to the reliable operation -of code and provide valuable insights about the inner workings of a system. - -Measurements are made using instruments provided by this package. The type of -instrument used will depend on the type of measurement being made and of what -part of a system is being measured. - -Instruments are categorized as Synchronous or Asynchronous and independently -as Adding or Grouping. Synchronous instruments are called by the user with a -Context. Asynchronous instruments are called by the SDK during collection. -Adding instruments are semantically intended for capturing a sum. Grouping -instruments are intended for capturing a distribution. - -Adding instruments may be monotonic, in which case they are non-decreasing -and naturally define a rate. - -The synchronous instrument names are: - - Counter: adding, monotonic - UpDownCounter: adding - Histogram: grouping - -and the asynchronous instruments are: - - CounterObserver: adding, monotonic - UpDownCounterObserver: adding - GaugeObserver: grouping - -All instruments are provided with support for either float64 or int64 input -values. - -An instrument is created using a Meter. Additionally, a Meter is used to -record batches of synchronous measurements or asynchronous observations. A -Meter is obtained using a MeterProvider. A Meter, like a Tracer, is unique to -the instrumentation it instruments and must be named and versioned when -created with a MeterProvider with the name and version of the instrumentation -library. - -Instrumentation should be designed to accept a MeterProvider from which it can -create its own unique Meter. Alternatively, the registered global -MeterProvider from the go.opentelemetry.io/otel package can be used as a -default. */ package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/global/global.go b/vendor/go.opentelemetry.io/otel/metric/global/global.go new file mode 100644 index 0000000000..cb0896d38a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/global/global.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/global" + +import ( + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/internal/global" +) + +// Meter returns a Meter from the global MeterProvider. The +// instrumentationName must be the name of the library providing +// instrumentation. This name may be the same as the instrumented code only if +// that code provides built-in instrumentation. If the instrumentationName is +// empty, then a implementation defined default name will be used instead. +// +// This is short for MeterProvider().Meter(name). +func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { + return MeterProvider().Meter(instrumentationName, opts...) +} + +// MeterProvider returns the registered global meter provider. +// If none is registered then a No-op MeterProvider is returned. +func MeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers `mp` as the global meter provider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/global/metric.go b/vendor/go.opentelemetry.io/otel/metric/global/metric.go deleted file mode 100644 index 14ba862002..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/global/metric.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/metric/global" - -import ( - "go.opentelemetry.io/otel/internal/metric/global" - "go.opentelemetry.io/otel/metric" -) - -// Meter creates an implementation of the Meter interface from the global -// MeterProvider. The instrumentationName must be the name of the library -// providing instrumentation. This name may be the same as the instrumented -// code only if that code provides built-in instrumentation. If the -// instrumentationName is empty, then a implementation defined default name -// will be used instead. -// -// This is short for MeterProvider().Meter(name) -func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { - return GetMeterProvider().Meter(instrumentationName, opts...) -} - -// GetMeterProvider returns the registered global meter provider. If -// none is registered then a default meter provider is returned that -// forwards the Meter interface to the first registered Meter. -// -// Use the meter provider to create a named meter. E.g. -// meter := global.MeterProvider().Meter("example.com/foo") -// or -// meter := global.Meter("example.com/foo") -func GetMeterProvider() metric.MeterProvider { - return global.MeterProvider() -} - -// SetMeterProvider registers `mp` as the global meter provider. -func SetMeterProvider(mp metric.MeterProvider) { - global.SetMeterProvider(mp) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go new file mode 100644 index 0000000000..0b5d5a99c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go @@ -0,0 +1,130 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Float64Observable describes a set of instruments used asynchronously to +// record float64 measurements once per collection cycle. Observations of +// these instruments are only made within a callback. +// +// Warning: methods may be added to this interface in minor releases. +type Float64Observable interface { + Asynchronous + + float64Observable() +} + +// Float64ObservableCounter is an instrument used to asynchronously record +// increasing float64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: methods may be added to this interface in minor releases. +type Float64ObservableCounter interface{ Float64Observable } + +// Float64ObservableUpDownCounter is an instrument used to asynchronously +// record float64 measurements once per collection cycle. Observations are only +// made within a callback for this instrument. The value observed is assumed +// the to be the cumulative sum of the count. +// +// Warning: methods may be added to this interface in minor releases. +type Float64ObservableUpDownCounter interface{ Float64Observable } + +// Float64ObservableGauge is an instrument used to asynchronously record +// instantaneous float64 measurements once per collection cycle. Observations +// are only made within a callback for this instrument. +// +// Warning: methods may be added to this interface in minor releases. +type Float64ObservableGauge interface{ Float64Observable } + +// Float64Observer is a recorder of float64 measurements. +// +// Warning: methods may be added to this interface in minor releases. +type Float64Observer interface { + Observe(value float64, attributes ...attribute.KeyValue) +} + +// Float64Callback is a function registered with a Meter that makes +// observations for a Float64Observerable instrument it is registered with. +// Calls to the Float64Observer record measurement values for the +// Float64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Float64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Float64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Float64Callback func(context.Context, Float64Observer) error + +// Float64ObserverConfig contains options for Asynchronous instruments that +// observe float64 values. +type Float64ObserverConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObserverConfig returns a new Float64ObserverConfig with all opts +// applied. +func NewFloat64ObserverConfig(opts ...Float64ObserverOption) Float64ObserverConfig { + var config Float64ObserverConfig + for _, o := range opts { + config = o.applyFloat64Observer(config) + } + return config +} + +// Description returns the Config description. +func (c Float64ObserverConfig) Description() string { + return c.description +} + +// Unit returns the Config unit. +func (c Float64ObserverConfig) Unit() string { + return c.unit +} + +// Callbacks returns the Config callbacks. +func (c Float64ObserverConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObserverOption applies options to float64 Observer instruments. +type Float64ObserverOption interface { + applyFloat64Observer(Float64ObserverConfig) Float64ObserverConfig +} + +type float64ObserverOptionFunc func(Float64ObserverConfig) Float64ObserverConfig + +func (fn float64ObserverOptionFunc) applyFloat64Observer(cfg Float64ObserverConfig) Float64ObserverConfig { + return fn(cfg) +} + +// WithFloat64Callback adds callback to be called for an instrument. +func WithFloat64Callback(callback Float64Callback) Float64ObserverOption { + return float64ObserverOptionFunc(func(cfg Float64ObserverConfig) Float64ObserverConfig { + cfg.callbacks = append(cfg.callbacks, callback) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go new file mode 100644 index 0000000000..05feeacb05 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go @@ -0,0 +1,130 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Int64Observable describes a set of instruments used asynchronously to record +// int64 measurements once per collection cycle. Observations of these +// instruments are only made within a callback. +// +// Warning: methods may be added to this interface in minor releases. +type Int64Observable interface { + Asynchronous + + int64Observable() +} + +// Int64ObservableCounter is an instrument used to asynchronously record +// increasing int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: methods may be added to this interface in minor releases. +type Int64ObservableCounter interface{ Int64Observable } + +// Int64ObservableUpDownCounter is an instrument used to asynchronously record +// int64 measurements once per collection cycle. Observations are only made +// within a callback for this instrument. The value observed is assumed the to +// be the cumulative sum of the count. +// +// Warning: methods may be added to this interface in minor releases. +type Int64ObservableUpDownCounter interface{ Int64Observable } + +// Int64ObservableGauge is an instrument used to asynchronously record +// instantaneous int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. +// +// Warning: methods may be added to this interface in minor releases. +type Int64ObservableGauge interface{ Int64Observable } + +// Int64Observer is a recorder of int64 measurements. +// +// Warning: methods may be added to this interface in minor releases. +type Int64Observer interface { + Observe(value int64, attributes ...attribute.KeyValue) +} + +// Int64Callback is a function registered with a Meter that makes +// observations for a Int64Observerable instrument it is registered with. +// Calls to the Int64Observer record measurement values for the +// Int64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Int64Callback. Meaning, it should not report measurements with the same +// attributes as another Int64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Int64Callback func(context.Context, Int64Observer) error + +// Int64ObserverConfig contains options for Asynchronous instruments that +// observe int64 values. +type Int64ObserverConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObserverConfig returns a new Int64ObserverConfig with all opts +// applied. +func NewInt64ObserverConfig(opts ...Int64ObserverOption) Int64ObserverConfig { + var config Int64ObserverConfig + for _, o := range opts { + config = o.applyInt64Observer(config) + } + return config +} + +// Description returns the Config description. +func (c Int64ObserverConfig) Description() string { + return c.description +} + +// Unit returns the Config unit. +func (c Int64ObserverConfig) Unit() string { + return c.unit +} + +// Callbacks returns the Config callbacks. +func (c Int64ObserverConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObserverOption applies options to int64 Observer instruments. +type Int64ObserverOption interface { + applyInt64Observer(Int64ObserverConfig) Int64ObserverConfig +} + +type int64ObserverOptionFunc func(Int64ObserverConfig) Int64ObserverConfig + +func (fn int64ObserverOptionFunc) applyInt64Observer(cfg Int64ObserverConfig) Int64ObserverConfig { + return fn(cfg) +} + +// WithInt64Callback adds callback to be called for an instrument. +func WithInt64Callback(callback Int64Callback) Int64ObserverOption { + return int64ObserverOptionFunc(func(cfg Int64ObserverConfig) Int64ObserverConfig { + cfg.callbacks = append(cfg.callbacks, callback) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go new file mode 100644 index 0000000000..f6dd9e890f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go @@ -0,0 +1,88 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +// Asynchronous instruments are instruments that are updated within a Callback. +// If an instrument is observed outside of it's callback it should be an error. +// +// This interface is used as a grouping mechanism. +type Asynchronous interface { + asynchronous() +} + +// Synchronous instruments are updated in line with application code. +// +// This interface is used as a grouping mechanism. +type Synchronous interface { + synchronous() +} + +// Option applies options to all instruments. +type Option interface { + Float64ObserverOption + Int64ObserverOption + Float64Option + Int64Option +} + +type descOpt string + +func (o descOpt) applyFloat64(c Float64Config) Float64Config { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64(c Int64Config) Int64Config { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Observer(c Float64ObserverConfig) Float64ObserverConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Observer(c Int64ObserverConfig) Int64ObserverConfig { + c.description = string(o) + return c +} + +// WithDescription sets the instrument description. +func WithDescription(desc string) Option { return descOpt(desc) } + +type unitOpt string + +func (o unitOpt) applyFloat64(c Float64Config) Float64Config { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64(c Int64Config) Int64Config { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Observer(c Float64ObserverConfig) Float64ObserverConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Observer(c Int64ObserverConfig) Int64ObserverConfig { + c.unit = string(o) + return c +} + +// WithUnit sets the instrument unit. +func WithUnit(u string) Option { return unitOpt(u) } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go new file mode 100644 index 0000000000..2cdfeb2691 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Float64Counter is an instrument that records increasing float64 values. +// +// Warning: methods may be added to this interface in minor releases. +type Float64Counter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Float64UpDownCounter is an instrument that records increasing or decreasing +// float64 values. +// +// Warning: methods may be added to this interface in minor releases. +type Float64UpDownCounter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Float64Histogram is an instrument that records a distribution of float64 +// values. +// +// Warning: methods may be added to this interface in minor releases. +type Float64Histogram interface { + // Record adds an additional value to the distribution. + Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Float64Config contains options for Asynchronous instruments that +// observe float64 values. +type Float64Config struct { + description string + unit string +} + +// Float64Config contains options for Synchronous instruments that record +// float64 values. +func NewFloat64Config(opts ...Float64Option) Float64Config { + var config Float64Config + for _, o := range opts { + config = o.applyFloat64(config) + } + return config +} + +// Description returns the Config description. +func (c Float64Config) Description() string { + return c.description +} + +// Unit returns the Config unit. +func (c Float64Config) Unit() string { + return c.unit +} + +// Float64Option applies options to synchronous float64 instruments. +type Float64Option interface { + applyFloat64(Float64Config) Float64Config +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go new file mode 100644 index 0000000000..e212c6d695 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Int64Counter is an instrument that records increasing int64 values. +// +// Warning: methods may be added to this interface in minor releases. +type Int64Counter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Int64UpDownCounter is an instrument that records increasing or decreasing +// int64 values. +// +// Warning: methods may be added to this interface in minor releases. +type Int64UpDownCounter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Int64Histogram is an instrument that records a distribution of int64 +// values. +// +// Warning: methods may be added to this interface in minor releases. +type Int64Histogram interface { + // Record adds an additional value to the distribution. + Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Int64Config contains options for Synchronous instruments that record int64 +// values. +type Int64Config struct { + description string + unit string +} + +// NewInt64Config returns a new Int64Config with all opts +// applied. +func NewInt64Config(opts ...Int64Option) Int64Config { + var config Int64Config + for _, o := range opts { + config = o.applyInt64(config) + } + return config +} + +// Description returns the Config description. +func (c Int64Config) Description() string { + return c.description +} + +// Unit returns the Config unit. +func (c Int64Config) Unit() string { + return c.unit +} + +// Int64Option applies options to synchronous int64 instruments. +type Int64Option interface { + applyInt64(Int64Config) Int64Config +} diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go new file mode 100644 index 0000000000..d1480fa5f3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go @@ -0,0 +1,355 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/internal/global" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/instrument" +) + +// unwrapper unwraps to return the underlying instrument implementation. +type unwrapper interface { + Unwrap() instrument.Asynchronous +} + +type afCounter struct { + instrument.Float64Observable + + name string + opts []instrument.Float64ObserverOption + + delegate atomic.Value //instrument.Float64ObservableCounter +} + +var _ unwrapper = (*afCounter)(nil) +var _ instrument.Float64ObservableCounter = (*afCounter)(nil) + +func (i *afCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afCounter) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Float64ObservableCounter) + } + return nil +} + +type afUpDownCounter struct { + instrument.Float64Observable + + name string + opts []instrument.Float64ObserverOption + + delegate atomic.Value //instrument.Float64ObservableUpDownCounter +} + +var _ unwrapper = (*afUpDownCounter)(nil) +var _ instrument.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) + +func (i *afUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afUpDownCounter) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Float64ObservableUpDownCounter) + } + return nil +} + +type afGauge struct { + instrument.Float64Observable + + name string + opts []instrument.Float64ObserverOption + + delegate atomic.Value //instrument.Float64ObservableGauge +} + +var _ unwrapper = (*afGauge)(nil) +var _ instrument.Float64ObservableGauge = (*afGauge)(nil) + +func (i *afGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableGauge(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afGauge) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Float64ObservableGauge) + } + return nil +} + +type aiCounter struct { + instrument.Int64Observable + + name string + opts []instrument.Int64ObserverOption + + delegate atomic.Value //instrument.Int64ObservableCounter +} + +var _ unwrapper = (*aiCounter)(nil) +var _ instrument.Int64ObservableCounter = (*aiCounter)(nil) + +func (i *aiCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiCounter) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Int64ObservableCounter) + } + return nil +} + +type aiUpDownCounter struct { + instrument.Int64Observable + + name string + opts []instrument.Int64ObserverOption + + delegate atomic.Value //instrument.Int64ObservableUpDownCounter +} + +var _ unwrapper = (*aiUpDownCounter)(nil) +var _ instrument.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) + +func (i *aiUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiUpDownCounter) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Int64ObservableUpDownCounter) + } + return nil +} + +type aiGauge struct { + instrument.Int64Observable + + name string + opts []instrument.Int64ObserverOption + + delegate atomic.Value //instrument.Int64ObservableGauge +} + +var _ unwrapper = (*aiGauge)(nil) +var _ instrument.Int64ObservableGauge = (*aiGauge)(nil) + +func (i *aiGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableGauge(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiGauge) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Int64ObservableGauge) + } + return nil +} + +// Sync Instruments. +type sfCounter struct { + name string + opts []instrument.Float64Option + + delegate atomic.Value //instrument.Float64Counter + + instrument.Synchronous +} + +var _ instrument.Float64Counter = (*sfCounter)(nil) + +func (i *sfCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64Counter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Float64Counter).Add(ctx, incr, attrs...) + } +} + +type sfUpDownCounter struct { + name string + opts []instrument.Float64Option + + delegate atomic.Value //instrument.Float64UpDownCounter + + instrument.Synchronous +} + +var _ instrument.Float64UpDownCounter = (*sfUpDownCounter)(nil) + +func (i *sfUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64UpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Float64UpDownCounter).Add(ctx, incr, attrs...) + } +} + +type sfHistogram struct { + name string + opts []instrument.Float64Option + + delegate atomic.Value //instrument.Float64Histogram + + instrument.Synchronous +} + +var _ instrument.Float64Histogram = (*sfHistogram)(nil) + +func (i *sfHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Float64Histogram(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfHistogram) Record(ctx context.Context, x float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Float64Histogram).Record(ctx, x, attrs...) + } +} + +type siCounter struct { + name string + opts []instrument.Int64Option + + delegate atomic.Value //instrument.Int64Counter + + instrument.Synchronous +} + +var _ instrument.Int64Counter = (*siCounter)(nil) + +func (i *siCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64Counter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Int64Counter).Add(ctx, x, attrs...) + } +} + +type siUpDownCounter struct { + name string + opts []instrument.Int64Option + + delegate atomic.Value //instrument.Int64UpDownCounter + + instrument.Synchronous +} + +var _ instrument.Int64UpDownCounter = (*siUpDownCounter)(nil) + +func (i *siUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64UpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siUpDownCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Int64UpDownCounter).Add(ctx, x, attrs...) + } +} + +type siHistogram struct { + name string + opts []instrument.Int64Option + + delegate atomic.Value //instrument.Int64Histogram + + instrument.Synchronous +} + +var _ instrument.Int64Histogram = (*siHistogram)(nil) + +func (i *siHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Int64Histogram(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siHistogram) Record(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Int64Histogram).Record(ctx, x, attrs...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go new file mode 100644 index 0000000000..8acf632863 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go @@ -0,0 +1,354 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/internal/global" + +import ( + "container/list" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/instrument" +) + +// meterProvider is a placeholder for a configured SDK MeterProvider. +// +// All MeterProvider functionality is forwarded to a delegate once +// configured. +type meterProvider struct { + mtx sync.Mutex + meters map[il]*meter + + delegate metric.MeterProvider +} + +type il struct { + name string + version string +} + +// setDelegate configures p to delegate all MeterProvider functionality to +// provider. +// +// All Meters provided prior to this function call are switched out to be +// Meters provided by provider. All instruments and callbacks are recreated and +// delegated. +// +// It is guaranteed by the caller that this happens only once. +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.meters) == 0 { + return + } + + for _, meter := range p.meters { + meter.setDelegate(provider) + } + + p.meters = nil +} + +// Meter implements MeterProvider. +func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. + + c := metric.NewMeterConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.meters == nil { + p.meters = make(map[il]*meter) + } + + if val, ok := p.meters[key]; ok { + return val + } + + t := &meter{name: name, opts: opts} + p.meters[key] = t + return t +} + +// meter is a placeholder for a metric.Meter. +// +// All Meter functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopMeter. +type meter struct { + name string + opts []metric.MeterOption + + mtx sync.Mutex + instruments []delegatedInstrument + + registry list.List + + delegate atomic.Value // metric.Meter +} + +type delegatedInstrument interface { + setDelegate(metric.Meter) +} + +// setDelegate configures m to delegate all Meter functionality to Meters +// created by provider. +// +// All subsequent calls to the Meter methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (m *meter) setDelegate(provider metric.MeterProvider) { + meter := provider.Meter(m.name, m.opts...) + m.delegate.Store(meter) + + m.mtx.Lock() + defer m.mtx.Unlock() + + for _, inst := range m.instruments { + inst.setDelegate(meter) + } + + for e := m.registry.Front(); e != nil; e = e.Next() { + r := e.Value.(*registration) + r.setDelegate(meter) + m.registry.Remove(e) + } + + m.instruments = nil + m.registry.Init() +} + +func (m *meter) Int64Counter(name string, options ...instrument.Int64Option) (instrument.Int64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64UpDownCounter(name string, options ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64Histogram(name string, options ...instrument.Int64Option) (instrument.Int64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableUpDownCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableGauge(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Counter(name string, options ...instrument.Float64Option) (instrument.Float64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64UpDownCounter(name string, options ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Histogram(name string, options ...instrument.Float64Option) (instrument.Float64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableUpDownCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableGauge(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +// RegisterCallback captures the function that will be called during Collect. +func (m *meter) RegisterCallback(f metric.Callback, insts ...instrument.Asynchronous) (metric.Registration, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + insts = unwrapInstruments(insts) + return del.RegisterCallback(f, insts...) + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + reg := ®istration{instruments: insts, function: f} + e := m.registry.PushBack(reg) + reg.unreg = func() error { + m.mtx.Lock() + _ = m.registry.Remove(e) + m.mtx.Unlock() + return nil + } + return reg, nil +} + +type wrapped interface { + unwrap() instrument.Asynchronous +} + +func unwrapInstruments(instruments []instrument.Asynchronous) []instrument.Asynchronous { + out := make([]instrument.Asynchronous, 0, len(instruments)) + + for _, inst := range instruments { + if in, ok := inst.(wrapped); ok { + out = append(out, in.unwrap()) + } else { + out = append(out, inst) + } + } + + return out +} + +type registration struct { + instruments []instrument.Asynchronous + function metric.Callback + + unreg func() error + unregMu sync.Mutex +} + +func (c *registration) setDelegate(m metric.Meter) { + insts := unwrapInstruments(c.instruments) + + c.unregMu.Lock() + defer c.unregMu.Unlock() + + if c.unreg == nil { + // Unregister already called. + return + } + + reg, err := m.RegisterCallback(c.function, insts...) + if err != nil { + otel.Handle(err) + } + + c.unreg = reg.Unregister +} + +func (c *registration) Unregister() error { + c.unregMu.Lock() + defer c.unregMu.Unlock() + if c.unreg == nil { + // Unregister already called. + return nil + } + + var err error + err, c.unreg = c.unreg(), nil + return err +} diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go similarity index 54% rename from vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go rename to vendor/go.opentelemetry.io/otel/metric/internal/global/state.go index 896c6dd1c3..47c0d787d8 100644 --- a/vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go @@ -4,7 +4,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// htmp://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,55 +12,57 @@ // See the License for the specific language governing permissions and // limitations under the License. -package global // import "go.opentelemetry.io/otel/internal/metric/global" +package global // import "go.opentelemetry.io/otel/metric/internal/global" import ( + "errors" "sync" "sync/atomic" + "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/metric" ) +var ( + globalMeterProvider = defaultMeterProvider() + + delegateMeterOnce sync.Once +) + type meterProviderHolder struct { mp metric.MeterProvider } -var ( - globalMeter = defaultMeterValue() - - delegateMeterOnce sync.Once -) - // MeterProvider is the internal implementation for global.MeterProvider. func MeterProvider() metric.MeterProvider { - return globalMeter.Load().(meterProviderHolder).mp + return globalMeterProvider.Load().(meterProviderHolder).mp } // SetMeterProvider is the internal implementation for global.SetMeterProvider. func SetMeterProvider(mp metric.MeterProvider) { - delegateMeterOnce.Do(func() { - current := MeterProvider() + current := MeterProvider() + if _, cOk := current.(*meterProvider); cOk { + if _, mpOk := mp.(*meterProvider); mpOk && current == mp { + // Do not assign the default delegating MeterProvider to delegate + // to itself. + global.Error( + errors.New("no delegate configured in meter provider"), + "Setting meter provider to it's current value. No delegate will be configured", + ) + return + } + } - if current == mp { - // Setting the provider to the prior default is nonsense, panic. - // Panic is acceptable because we are likely still early in the - // process lifetime. - panic("invalid MeterProvider, the global instance cannot be reinstalled") - } else if def, ok := current.(*meterProvider); ok { + delegateMeterOnce.Do(func() { + if def, ok := current.(*meterProvider); ok { def.setDelegate(mp) } }) - globalMeter.Store(meterProviderHolder{mp: mp}) + globalMeterProvider.Store(meterProviderHolder{mp: mp}) } -func defaultMeterValue() *atomic.Value { +func defaultMeterProvider() *atomic.Value { v := &atomic.Value{} - v.Store(meterProviderHolder{mp: newMeterProvider()}) + v.Store(meterProviderHolder{mp: &meterProvider{}}) return v } - -// ResetForTest restores the initial global state, for testing purposes. -func ResetForTest() { - globalMeter = defaultMeterValue() - delegateMeterOnce = sync.Once{} -} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 0000000000..2f69d2ae54 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,138 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/instrument" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or library. +// +// Warning: methods may be added to this interface in minor releases. +type MeterProvider interface { + // Meter creates an instance of a `Meter` interface. The name must be the + // name of the library providing instrumentation. This name may be the same + // as the instrumented code only if that code provides built-in + // instrumentation. If the name is empty, then a implementation defined + // default name will be used instead. + Meter(name string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: methods may be added to this interface in minor releases. +type Meter interface { + // Int64Counter returns a new instrument identified by name and configured + // with options. The instrument is used to synchronously record increasing + // int64 measurements during a computational operation. + Int64Counter(name string, options ...instrument.Int64Option) (instrument.Int64Counter, error) + // Int64UpDownCounter returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // int64 measurements during a computational operation. + Int64UpDownCounter(name string, options ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) + // Int64Histogram returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // the distribution of int64 measurements during a computational operation. + Int64Histogram(name string, options ...instrument.Int64Option) (instrument.Int64Histogram, error) + // Int64ObservableCounter returns a new instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // increasing int64 measurements once per a measurement collection cycle. + Int64ObservableCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new instrument identified by name + // and configured with options. The instrument is used to asynchronously + // record int64 measurements once per a measurement collection cycle. + Int64ObservableUpDownCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // instantaneous int64 measurements once per a measurement collection + // cycle. + Int64ObservableGauge(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) + + // Float64Counter returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // increasing float64 measurements during a computational operation. + Float64Counter(name string, options ...instrument.Float64Option) (instrument.Float64Counter, error) + // Float64UpDownCounter returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // float64 measurements during a computational operation. + Float64UpDownCounter(name string, options ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) + // Float64Histogram returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // the distribution of float64 measurements during a computational + // operation. + Float64Histogram(name string, options ...instrument.Float64Option) (instrument.Float64Histogram, error) + // Float64ObservableCounter returns a new instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // increasing float64 measurements once per a measurement collection cycle. + Float64ObservableCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new instrument identified by + // name and configured with options. The instrument is used to + // asynchronously record float64 measurements once per a measurement + // collection cycle. + Float64ObservableUpDownCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // instantaneous float64 measurements once per a measurement collection + // cycle. + Float64ObservableGauge(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) + + // RegisterCallback registers f to be called during the collection of a + // measurement cycle. + // + // If Unregister of the returned Registration is called, f needs to be + // unregistered and not called during collection. + // + // The instruments f is registered with are the only instruments that f may + // observe values for. + // + // If no instruments are passed, f should not be registered nor called + // during collection. + RegisterCallback(f Callback, instruments ...instrument.Asynchronous) (Registration, error) +} + +// Callback is a function registered with a Meter that makes observations for +// the set of instruments it is registered with. The Observer parameter is used +// to record measurment observations for these instruments. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Callbacks. Meaning, it should not report measurements for an instrument with +// the same attributes as another Callback will report. +// +// The function needs to be concurrent safe. +type Callback func(context.Context, Observer) error + +// Observer records measurements for multiple instruments in a Callback. +type Observer interface { + // ObserveFloat64 records the float64 value with attributes for obsrv. + ObserveFloat64(obsrv instrument.Float64Observable, value float64, attributes ...attribute.KeyValue) + // ObserveInt64 records the int64 value with attributes for obsrv. + ObserveInt64(obsrv instrument.Int64Observable, value int64, attributes ...attribute.KeyValue) +} + +// Registration is an token representing the unique registration of a callback +// for a set of instruments with a Meter. +type Registration interface { + // Unregister removes the callback registration from a Meter. + // + // This method needs to be idempotent and concurrent safe. + Unregister() error +} diff --git a/vendor/go.opentelemetry.io/otel/metric/metric.go b/vendor/go.opentelemetry.io/otel/metric/metric.go deleted file mode 100644 index d8c5a6b3f3..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/metric.go +++ /dev/null @@ -1,538 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// MeterProvider supports named Meter instances. -type MeterProvider interface { - // Meter creates an implementation of the Meter interface. - // The instrumentationName must be the name of the library providing - // instrumentation. This name may be the same as the instrumented code - // only if that code provides built-in instrumentation. If the - // instrumentationName is empty, then a implementation defined default - // name will be used instead. - Meter(instrumentationName string, opts ...MeterOption) Meter -} - -// Meter is the creator of metric instruments. -// -// An uninitialized Meter is a no-op implementation. -type Meter struct { - impl sdkapi.MeterImpl -} - -// WrapMeterImpl constructs a `Meter` implementation from a -// `MeterImpl` implementation. -func WrapMeterImpl(impl sdkapi.MeterImpl) Meter { - return Meter{ - impl: impl, - } -} - -// Measurement is used for reporting a synchronous batch of metric -// values. Instances of this type should be created by synchronous -// instruments (e.g., Int64Counter.Measurement()). -// -// Note: This is an alias because it is a first-class member of the -// API but is also part of the lower-level sdkapi interface. -type Measurement = sdkapi.Measurement - -// Observation is used for reporting an asynchronous batch of metric -// values. Instances of this type should be created by asynchronous -// instruments (e.g., Int64GaugeObserver.Observation()). -// -// Note: This is an alias because it is a first-class member of the -// API but is also part of the lower-level sdkapi interface. -type Observation = sdkapi.Observation - -// RecordBatch atomically records a batch of measurements. -func (m Meter) RecordBatch(ctx context.Context, ls []attribute.KeyValue, ms ...Measurement) { - if m.impl == nil { - return - } - m.impl.RecordBatch(ctx, ls, ms...) -} - -// NewBatchObserver creates a new BatchObserver that supports -// making batches of observations for multiple instruments. -func (m Meter) NewBatchObserver(callback BatchObserverFunc) BatchObserver { - return BatchObserver{ - meter: m, - runner: newBatchAsyncRunner(callback), - } -} - -// NewInt64Counter creates a new integer Counter instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64Counter(name string, options ...InstrumentOption) (Int64Counter, error) { - return wrapInt64CounterInstrument( - m.newSync(name, sdkapi.CounterInstrumentKind, number.Int64Kind, options)) -} - -// NewFloat64Counter creates a new floating point Counter with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64Counter(name string, options ...InstrumentOption) (Float64Counter, error) { - return wrapFloat64CounterInstrument( - m.newSync(name, sdkapi.CounterInstrumentKind, number.Float64Kind, options)) -} - -// NewInt64UpDownCounter creates a new integer UpDownCounter instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64UpDownCounter(name string, options ...InstrumentOption) (Int64UpDownCounter, error) { - return wrapInt64UpDownCounterInstrument( - m.newSync(name, sdkapi.UpDownCounterInstrumentKind, number.Int64Kind, options)) -} - -// NewFloat64UpDownCounter creates a new floating point UpDownCounter with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64UpDownCounter(name string, options ...InstrumentOption) (Float64UpDownCounter, error) { - return wrapFloat64UpDownCounterInstrument( - m.newSync(name, sdkapi.UpDownCounterInstrumentKind, number.Float64Kind, options)) -} - -// NewInt64Histogram creates a new integer Histogram instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64Histogram(name string, opts ...InstrumentOption) (Int64Histogram, error) { - return wrapInt64HistogramInstrument( - m.newSync(name, sdkapi.HistogramInstrumentKind, number.Int64Kind, opts)) -} - -// NewFloat64Histogram creates a new floating point Histogram with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64Histogram(name string, opts ...InstrumentOption) (Float64Histogram, error) { - return wrapFloat64HistogramInstrument( - m.newSync(name, sdkapi.HistogramInstrumentKind, number.Float64Kind, opts)) -} - -// NewInt64GaugeObserver creates a new integer GaugeObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64GaugeObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64GaugeObserver, error) { - if callback == nil { - return wrapInt64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64GaugeObserverInstrument( - m.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64GaugeObserver creates a new floating point GaugeObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64GaugeObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64GaugeObserver, error) { - if callback == nil { - return wrapFloat64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64GaugeObserverInstrument( - m.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64CounterObserver creates a new integer CounterObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64CounterObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64CounterObserver, error) { - if callback == nil { - return wrapInt64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64CounterObserverInstrument( - m.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64CounterObserver creates a new floating point CounterObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64CounterObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64CounterObserver, error) { - if callback == nil { - return wrapFloat64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64CounterObserverInstrument( - m.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64UpDownCounterObserver creates a new integer UpDownCounterObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64UpDownCounterObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64UpDownCounterObserver, error) { - if callback == nil { - return wrapInt64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64UpDownCounterObserverInstrument( - m.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64UpDownCounterObserver creates a new floating point UpDownCounterObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64UpDownCounterObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64UpDownCounterObserver, error) { - if callback == nil { - return wrapFloat64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64UpDownCounterObserverInstrument( - m.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64GaugeObserver creates a new integer GaugeObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64GaugeObserver(name string, opts ...InstrumentOption) (Int64GaugeObserver, error) { - if b.runner == nil { - return wrapInt64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64GaugeObserverInstrument( - b.meter.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64GaugeObserver creates a new floating point GaugeObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64GaugeObserver(name string, opts ...InstrumentOption) (Float64GaugeObserver, error) { - if b.runner == nil { - return wrapFloat64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64GaugeObserverInstrument( - b.meter.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// NewInt64CounterObserver creates a new integer CounterObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64CounterObserver(name string, opts ...InstrumentOption) (Int64CounterObserver, error) { - if b.runner == nil { - return wrapInt64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64CounterObserverInstrument( - b.meter.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64CounterObserver creates a new floating point CounterObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64CounterObserver(name string, opts ...InstrumentOption) (Float64CounterObserver, error) { - if b.runner == nil { - return wrapFloat64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64CounterObserverInstrument( - b.meter.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// NewInt64UpDownCounterObserver creates a new integer UpDownCounterObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64UpDownCounterObserver(name string, opts ...InstrumentOption) (Int64UpDownCounterObserver, error) { - if b.runner == nil { - return wrapInt64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64UpDownCounterObserverInstrument( - b.meter.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64UpDownCounterObserver creates a new floating point UpDownCounterObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64UpDownCounterObserver(name string, opts ...InstrumentOption) (Float64UpDownCounterObserver, error) { - if b.runner == nil { - return wrapFloat64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64UpDownCounterObserverInstrument( - b.meter.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// MeterImpl returns the underlying MeterImpl of this Meter. -func (m Meter) MeterImpl() sdkapi.MeterImpl { - return m.impl -} - -// newAsync constructs one new asynchronous instrument. -func (m Meter) newAsync( - name string, - mkind sdkapi.InstrumentKind, - nkind number.Kind, - opts []InstrumentOption, - runner sdkapi.AsyncRunner, -) ( - sdkapi.AsyncImpl, - error, -) { - if m.impl == nil { - return sdkapi.NewNoopAsyncInstrument(), nil - } - cfg := NewInstrumentConfig(opts...) - desc := sdkapi.NewDescriptor(name, mkind, nkind, cfg.description, cfg.unit) - return m.impl.NewAsyncInstrument(desc, runner) -} - -// newSync constructs one new synchronous instrument. -func (m Meter) newSync( - name string, - metricKind sdkapi.InstrumentKind, - numberKind number.Kind, - opts []InstrumentOption, -) ( - sdkapi.SyncImpl, - error, -) { - if m.impl == nil { - return sdkapi.NewNoopSyncInstrument(), nil - } - cfg := NewInstrumentConfig(opts...) - desc := sdkapi.NewDescriptor(name, metricKind, numberKind, cfg.description, cfg.unit) - return m.impl.NewSyncInstrument(desc) -} - -// MeterMust is a wrapper for Meter interfaces that panics when any -// instrument constructor encounters an error. -type MeterMust struct { - meter Meter -} - -// BatchObserverMust is a wrapper for BatchObserver that panics when -// any instrument constructor encounters an error. -type BatchObserverMust struct { - batch BatchObserver -} - -// Must constructs a MeterMust implementation from a Meter, allowing -// the application to panic when any instrument constructor yields an -// error. -func Must(meter Meter) MeterMust { - return MeterMust{meter: meter} -} - -// NewInt64Counter calls `Meter.NewInt64Counter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64Counter(name string, cos ...InstrumentOption) Int64Counter { - if inst, err := mm.meter.NewInt64Counter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64Counter calls `Meter.NewFloat64Counter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64Counter(name string, cos ...InstrumentOption) Float64Counter { - if inst, err := mm.meter.NewFloat64Counter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownCounter calls `Meter.NewInt64UpDownCounter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64UpDownCounter(name string, cos ...InstrumentOption) Int64UpDownCounter { - if inst, err := mm.meter.NewInt64UpDownCounter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownCounter calls `Meter.NewFloat64UpDownCounter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64UpDownCounter(name string, cos ...InstrumentOption) Float64UpDownCounter { - if inst, err := mm.meter.NewFloat64UpDownCounter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64Histogram calls `Meter.NewInt64Histogram` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64Histogram(name string, mos ...InstrumentOption) Int64Histogram { - if inst, err := mm.meter.NewInt64Histogram(name, mos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64Histogram calls `Meter.NewFloat64Histogram` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64Histogram(name string, mos ...InstrumentOption) Float64Histogram { - if inst, err := mm.meter.NewFloat64Histogram(name, mos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64GaugeObserver calls `Meter.NewInt64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64GaugeObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64GaugeObserver { - if inst, err := mm.meter.NewInt64GaugeObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64GaugeObserver calls `Meter.NewFloat64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64GaugeObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64GaugeObserver { - if inst, err := mm.meter.NewFloat64GaugeObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64CounterObserver calls `Meter.NewInt64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64CounterObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64CounterObserver { - if inst, err := mm.meter.NewInt64CounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64CounterObserver calls `Meter.NewFloat64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64CounterObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64CounterObserver { - if inst, err := mm.meter.NewFloat64CounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownCounterObserver calls `Meter.NewInt64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64UpDownCounterObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64UpDownCounterObserver { - if inst, err := mm.meter.NewInt64UpDownCounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownCounterObserver calls `Meter.NewFloat64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64UpDownCounterObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64UpDownCounterObserver { - if inst, err := mm.meter.NewFloat64UpDownCounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewBatchObserver returns a wrapper around BatchObserver that panics -// when any instrument constructor returns an error. -func (mm MeterMust) NewBatchObserver(callback BatchObserverFunc) BatchObserverMust { - return BatchObserverMust{ - batch: mm.meter.NewBatchObserver(callback), - } -} - -// NewInt64GaugeObserver calls `BatchObserver.NewInt64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64GaugeObserver(name string, oos ...InstrumentOption) Int64GaugeObserver { - if inst, err := bm.batch.NewInt64GaugeObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64GaugeObserver calls `BatchObserver.NewFloat64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64GaugeObserver(name string, oos ...InstrumentOption) Float64GaugeObserver { - if inst, err := bm.batch.NewFloat64GaugeObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64CounterObserver calls `BatchObserver.NewInt64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64CounterObserver(name string, oos ...InstrumentOption) Int64CounterObserver { - if inst, err := bm.batch.NewInt64CounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64CounterObserver calls `BatchObserver.NewFloat64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64CounterObserver(name string, oos ...InstrumentOption) Float64CounterObserver { - if inst, err := bm.batch.NewFloat64CounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownCounterObserver calls `BatchObserver.NewInt64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64UpDownCounterObserver(name string, oos ...InstrumentOption) Int64UpDownCounterObserver { - if inst, err := bm.batch.NewInt64UpDownCounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownCounterObserver calls `BatchObserver.NewFloat64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64UpDownCounterObserver(name string, oos ...InstrumentOption) Float64UpDownCounterObserver { - if inst, err := bm.batch.NewFloat64UpDownCounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} diff --git a/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go b/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go deleted file mode 100644 index 2da24c8f21..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - "errors" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// ErrSDKReturnedNilImpl is returned when a new `MeterImpl` returns nil. -var ErrSDKReturnedNilImpl = errors.New("SDK returned a nil implementation") - -// Int64ObserverFunc is a type of callback that integral -// observers run. -type Int64ObserverFunc func(context.Context, Int64ObserverResult) - -// Float64ObserverFunc is a type of callback that floating point -// observers run. -type Float64ObserverFunc func(context.Context, Float64ObserverResult) - -// BatchObserverFunc is a callback argument for use with any -// Observer instrument that will be reported as a batch of -// observations. -type BatchObserverFunc func(context.Context, BatchObserverResult) - -// Int64ObserverResult is passed to an observer callback to capture -// observations for one asynchronous integer metric instrument. -type Int64ObserverResult struct { - instrument sdkapi.AsyncImpl - function func([]attribute.KeyValue, ...Observation) -} - -// Float64ObserverResult is passed to an observer callback to capture -// observations for one asynchronous floating point metric instrument. -type Float64ObserverResult struct { - instrument sdkapi.AsyncImpl - function func([]attribute.KeyValue, ...Observation) -} - -// BatchObserverResult is passed to a batch observer callback to -// capture observations for multiple asynchronous instruments. -type BatchObserverResult struct { - function func([]attribute.KeyValue, ...Observation) -} - -// Observe captures a single integer value from the associated -// instrument callback, with the given labels. -func (ir Int64ObserverResult) Observe(value int64, labels ...attribute.KeyValue) { - ir.function(labels, sdkapi.NewObservation(ir.instrument, number.NewInt64Number(value))) -} - -// Observe captures a single floating point value from the associated -// instrument callback, with the given labels. -func (fr Float64ObserverResult) Observe(value float64, labels ...attribute.KeyValue) { - fr.function(labels, sdkapi.NewObservation(fr.instrument, number.NewFloat64Number(value))) -} - -// Observe captures a multiple observations from the associated batch -// instrument callback, with the given labels. -func (br BatchObserverResult) Observe(labels []attribute.KeyValue, obs ...Observation) { - br.function(labels, obs...) -} - -var _ sdkapi.AsyncSingleRunner = (*Int64ObserverFunc)(nil) -var _ sdkapi.AsyncSingleRunner = (*Float64ObserverFunc)(nil) -var _ sdkapi.AsyncBatchRunner = (*BatchObserverFunc)(nil) - -// newInt64AsyncRunner returns a single-observer callback for integer Observer instruments. -func newInt64AsyncRunner(c Int64ObserverFunc) sdkapi.AsyncSingleRunner { - return &c -} - -// newFloat64AsyncRunner returns a single-observer callback for floating point Observer instruments. -func newFloat64AsyncRunner(c Float64ObserverFunc) sdkapi.AsyncSingleRunner { - return &c -} - -// newBatchAsyncRunner returns a batch-observer callback use with multiple Observer instruments. -func newBatchAsyncRunner(c BatchObserverFunc) sdkapi.AsyncBatchRunner { - return &c -} - -// AnyRunner implements AsyncRunner. -func (*Int64ObserverFunc) AnyRunner() {} - -// AnyRunner implements AsyncRunner. -func (*Float64ObserverFunc) AnyRunner() {} - -// AnyRunner implements AsyncRunner. -func (*BatchObserverFunc) AnyRunner() {} - -// Run implements AsyncSingleRunner. -func (i *Int64ObserverFunc) Run(ctx context.Context, impl sdkapi.AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { - (*i)(ctx, Int64ObserverResult{ - instrument: impl, - function: function, - }) -} - -// Run implements AsyncSingleRunner. -func (f *Float64ObserverFunc) Run(ctx context.Context, impl sdkapi.AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { - (*f)(ctx, Float64ObserverResult{ - instrument: impl, - function: function, - }) -} - -// Run implements AsyncBatchRunner. -func (b *BatchObserverFunc) Run(ctx context.Context, function func([]attribute.KeyValue, ...Observation)) { - (*b)(ctx, BatchObserverResult{ - function: function, - }) -} - -// wrapInt64GaugeObserverInstrument converts an AsyncImpl into Int64GaugeObserver. -func wrapInt64GaugeObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64GaugeObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64GaugeObserver{asyncInstrument: common}, err -} - -// wrapFloat64GaugeObserverInstrument converts an AsyncImpl into Float64GaugeObserver. -func wrapFloat64GaugeObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64GaugeObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64GaugeObserver{asyncInstrument: common}, err -} - -// wrapInt64CounterObserverInstrument converts an AsyncImpl into Int64CounterObserver. -func wrapInt64CounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64CounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64CounterObserver{asyncInstrument: common}, err -} - -// wrapFloat64CounterObserverInstrument converts an AsyncImpl into Float64CounterObserver. -func wrapFloat64CounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64CounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64CounterObserver{asyncInstrument: common}, err -} - -// wrapInt64UpDownCounterObserverInstrument converts an AsyncImpl into Int64UpDownCounterObserver. -func wrapInt64UpDownCounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64UpDownCounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64UpDownCounterObserver{asyncInstrument: common}, err -} - -// wrapFloat64UpDownCounterObserverInstrument converts an AsyncImpl into Float64UpDownCounterObserver. -func wrapFloat64UpDownCounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64UpDownCounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64UpDownCounterObserver{asyncInstrument: common}, err -} - -// BatchObserver represents an Observer callback that can report -// observations for multiple instruments. -type BatchObserver struct { - meter Meter - runner sdkapi.AsyncBatchRunner -} - -// Int64GaugeObserver is a metric that captures a set of int64 values at a -// point in time. -type Int64GaugeObserver struct { - asyncInstrument -} - -// Float64GaugeObserver is a metric that captures a set of float64 values -// at a point in time. -type Float64GaugeObserver struct { - asyncInstrument -} - -// Int64CounterObserver is a metric that captures a precomputed sum of -// int64 values at a point in time. -type Int64CounterObserver struct { - asyncInstrument -} - -// Float64CounterObserver is a metric that captures a precomputed sum of -// float64 values at a point in time. -type Float64CounterObserver struct { - asyncInstrument -} - -// Int64UpDownCounterObserver is a metric that captures a precomputed sum of -// int64 values at a point in time. -type Int64UpDownCounterObserver struct { - asyncInstrument -} - -// Float64UpDownCounterObserver is a metric that captures a precomputed sum of -// float64 values at a point in time. -type Float64UpDownCounterObserver struct { - asyncInstrument -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64GaugeObserver) Observation(v int64) Observation { - return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64GaugeObserver) Observation(v float64) Observation { - return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64CounterObserver) Observation(v int64) Observation { - return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64CounterObserver) Observation(v float64) Observation { - return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64UpDownCounterObserver) Observation(v int64) Observation { - return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64UpDownCounterObserver) Observation(v float64) Observation { - return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) -} - -// syncInstrument contains a SyncImpl. -type syncInstrument struct { - instrument sdkapi.SyncImpl -} - -// asyncInstrument contains a AsyncImpl. -type asyncInstrument struct { - instrument sdkapi.AsyncImpl -} - -// AsyncImpl implements AsyncImpl. -func (a asyncInstrument) AsyncImpl() sdkapi.AsyncImpl { - return a.instrument -} - -// SyncImpl returns the implementation object for synchronous instruments. -func (s syncInstrument) SyncImpl() sdkapi.SyncImpl { - return s.instrument -} - -func (s syncInstrument) float64Measurement(value float64) Measurement { - return sdkapi.NewMeasurement(s.instrument, number.NewFloat64Number(value)) -} - -func (s syncInstrument) int64Measurement(value int64) Measurement { - return sdkapi.NewMeasurement(s.instrument, number.NewInt64Number(value)) -} - -func (s syncInstrument) directRecord(ctx context.Context, number number.Number, labels []attribute.KeyValue) { - s.instrument.RecordOne(ctx, number, labels) -} - -// checkNewAsync receives an AsyncImpl and potential -// error, and returns the same types, checking for and ensuring that -// the returned interface is not nil. -func checkNewAsync(instrument sdkapi.AsyncImpl, err error) (asyncInstrument, error) { - if instrument == nil { - if err == nil { - err = ErrSDKReturnedNilImpl - } - instrument = sdkapi.NewNoopAsyncInstrument() - } - return asyncInstrument{ - instrument: instrument, - }, err -} - -// checkNewSync receives an SyncImpl and potential -// error, and returns the same types, checking for and ensuring that -// the returned interface is not nil. -func checkNewSync(instrument sdkapi.SyncImpl, err error) (syncInstrument, error) { - if instrument == nil { - if err == nil { - err = ErrSDKReturnedNilImpl - } - // Note: an alternate behavior would be to synthesize a new name - // or group all duplicately-named instruments of a certain type - // together and use a tag for the original name, e.g., - // name = 'invalid.counter.int64' - // label = 'original-name=duplicate-counter-name' - instrument = sdkapi.NewNoopSyncInstrument() - } - return syncInstrument{ - instrument: instrument, - }, err -} - -// wrapInt64CounterInstrument converts a SyncImpl into Int64Counter. -func wrapInt64CounterInstrument(syncInst sdkapi.SyncImpl, err error) (Int64Counter, error) { - common, err := checkNewSync(syncInst, err) - return Int64Counter{syncInstrument: common}, err -} - -// wrapFloat64CounterInstrument converts a SyncImpl into Float64Counter. -func wrapFloat64CounterInstrument(syncInst sdkapi.SyncImpl, err error) (Float64Counter, error) { - common, err := checkNewSync(syncInst, err) - return Float64Counter{syncInstrument: common}, err -} - -// wrapInt64UpDownCounterInstrument converts a SyncImpl into Int64UpDownCounter. -func wrapInt64UpDownCounterInstrument(syncInst sdkapi.SyncImpl, err error) (Int64UpDownCounter, error) { - common, err := checkNewSync(syncInst, err) - return Int64UpDownCounter{syncInstrument: common}, err -} - -// wrapFloat64UpDownCounterInstrument converts a SyncImpl into Float64UpDownCounter. -func wrapFloat64UpDownCounterInstrument(syncInst sdkapi.SyncImpl, err error) (Float64UpDownCounter, error) { - common, err := checkNewSync(syncInst, err) - return Float64UpDownCounter{syncInstrument: common}, err -} - -// wrapInt64HistogramInstrument converts a SyncImpl into Int64Histogram. -func wrapInt64HistogramInstrument(syncInst sdkapi.SyncImpl, err error) (Int64Histogram, error) { - common, err := checkNewSync(syncInst, err) - return Int64Histogram{syncInstrument: common}, err -} - -// wrapFloat64HistogramInstrument converts a SyncImpl into Float64Histogram. -func wrapFloat64HistogramInstrument(syncInst sdkapi.SyncImpl, err error) (Float64Histogram, error) { - common, err := checkNewSync(syncInst, err) - return Float64Histogram{syncInstrument: common}, err -} - -// Float64Counter is a metric that accumulates float64 values. -type Float64Counter struct { - syncInstrument -} - -// Int64Counter is a metric that accumulates int64 values. -type Int64Counter struct { - syncInstrument -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64Counter) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64Counter) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Float64Counter) Add(ctx context.Context, value float64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Int64Counter) Add(ctx context.Context, value int64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} - -// Float64UpDownCounter is a metric instrument that sums floating -// point values. -type Float64UpDownCounter struct { - syncInstrument -} - -// Int64UpDownCounter is a metric instrument that sums integer values. -type Int64UpDownCounter struct { - syncInstrument -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64UpDownCounter) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64UpDownCounter) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Float64UpDownCounter) Add(ctx context.Context, value float64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Int64UpDownCounter) Add(ctx context.Context, value int64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} - -// Float64Histogram is a metric that records float64 values. -type Float64Histogram struct { - syncInstrument -} - -// Int64Histogram is a metric that records int64 values. -type Int64Histogram struct { - syncInstrument -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64Histogram) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64Histogram) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Record adds a new value to the list of Histogram's records. The -// labels should contain the keys and values to be associated with -// this value. -func (c Float64Histogram) Record(ctx context.Context, value float64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Record adds a new value to the Histogram's distribution. The -// labels should contain the keys and values to be associated with -// this value. -func (c Int64Histogram) Record(ctx context.Context, value int64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop.go index 37c653f51a..f38619e39a 100644 --- a/vendor/go.opentelemetry.io/otel/metric/noop.go +++ b/vendor/go.opentelemetry.io/otel/metric/noop.go @@ -14,17 +14,130 @@ package metric // import "go.opentelemetry.io/otel/metric" -type noopMeterProvider struct{} +import ( + "context" -// NewNoopMeterProvider returns an implementation of MeterProvider that -// performs no operations. The Meter and Instrument created from the returned -// MeterProvider also perform no operations. + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/instrument" +) + +// NewNoopMeterProvider creates a MeterProvider that does not record any metrics. func NewNoopMeterProvider() MeterProvider { return noopMeterProvider{} } -var _ MeterProvider = noopMeterProvider{} +type noopMeterProvider struct{} -func (noopMeterProvider) Meter(instrumentationName string, opts ...MeterOption) Meter { - return Meter{} +func (noopMeterProvider) Meter(string, ...MeterOption) Meter { + return noopMeter{} } + +// NewNoopMeter creates a Meter that does not record any metrics. +func NewNoopMeter() Meter { + return noopMeter{} +} + +type noopMeter struct{} + +func (noopMeter) Int64Counter(string, ...instrument.Int64Option) (instrument.Int64Counter, error) { + return nonrecordingSyncInt64Instrument{}, nil +} + +func (noopMeter) Int64UpDownCounter(string, ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) { + return nonrecordingSyncInt64Instrument{}, nil +} + +func (noopMeter) Int64Histogram(string, ...instrument.Int64Option) (instrument.Int64Histogram, error) { + return nonrecordingSyncInt64Instrument{}, nil +} + +func (noopMeter) Int64ObservableCounter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) { + return nonrecordingAsyncInt64Instrument{}, nil +} + +func (noopMeter) Int64ObservableUpDownCounter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) { + return nonrecordingAsyncInt64Instrument{}, nil +} + +func (noopMeter) Int64ObservableGauge(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) { + return nonrecordingAsyncInt64Instrument{}, nil +} + +func (noopMeter) Float64Counter(string, ...instrument.Float64Option) (instrument.Float64Counter, error) { + return nonrecordingSyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64UpDownCounter(string, ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) { + return nonrecordingSyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64Histogram(string, ...instrument.Float64Option) (instrument.Float64Histogram, error) { + return nonrecordingSyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64ObservableCounter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) { + return nonrecordingAsyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64ObservableUpDownCounter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) { + return nonrecordingAsyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64ObservableGauge(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) { + return nonrecordingAsyncFloat64Instrument{}, nil +} + +// RegisterCallback creates a register callback that does not record any metrics. +func (noopMeter) RegisterCallback(Callback, ...instrument.Asynchronous) (Registration, error) { + return noopReg{}, nil +} + +type noopReg struct{} + +func (noopReg) Unregister() error { return nil } + +type nonrecordingAsyncFloat64Instrument struct { + instrument.Float64Observable +} + +var ( + _ instrument.Float64ObservableCounter = nonrecordingAsyncFloat64Instrument{} + _ instrument.Float64ObservableUpDownCounter = nonrecordingAsyncFloat64Instrument{} + _ instrument.Float64ObservableGauge = nonrecordingAsyncFloat64Instrument{} +) + +type nonrecordingAsyncInt64Instrument struct { + instrument.Int64Observable +} + +var ( + _ instrument.Int64ObservableCounter = nonrecordingAsyncInt64Instrument{} + _ instrument.Int64ObservableUpDownCounter = nonrecordingAsyncInt64Instrument{} + _ instrument.Int64ObservableGauge = nonrecordingAsyncInt64Instrument{} +) + +type nonrecordingSyncFloat64Instrument struct { + instrument.Synchronous +} + +var ( + _ instrument.Float64Counter = nonrecordingSyncFloat64Instrument{} + _ instrument.Float64UpDownCounter = nonrecordingSyncFloat64Instrument{} + _ instrument.Float64Histogram = nonrecordingSyncFloat64Instrument{} +) + +func (nonrecordingSyncFloat64Instrument) Add(context.Context, float64, ...attribute.KeyValue) {} +func (nonrecordingSyncFloat64Instrument) Record(context.Context, float64, ...attribute.KeyValue) {} + +type nonrecordingSyncInt64Instrument struct { + instrument.Synchronous +} + +var ( + _ instrument.Int64Counter = nonrecordingSyncInt64Instrument{} + _ instrument.Int64UpDownCounter = nonrecordingSyncInt64Instrument{} + _ instrument.Int64Histogram = nonrecordingSyncInt64Instrument{} +) + +func (nonrecordingSyncInt64Instrument) Add(context.Context, int64, ...attribute.KeyValue) {} +func (nonrecordingSyncInt64Instrument) Record(context.Context, int64, ...attribute.KeyValue) {} diff --git a/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go b/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go deleted file mode 100644 index 6288c7ea29..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type=Kind"; DO NOT EDIT. - -package number - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Int64Kind-0] - _ = x[Float64Kind-1] -} - -const _Kind_name = "Int64KindFloat64Kind" - -var _Kind_index = [...]uint8{0, 9, 20} - -func (i Kind) String() string { - if i < 0 || i >= Kind(len(_Kind_index)-1) { - return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] -} diff --git a/vendor/go.opentelemetry.io/otel/metric/number/number.go b/vendor/go.opentelemetry.io/otel/metric/number/number.go deleted file mode 100644 index 3ec95e2014..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/number/number.go +++ /dev/null @@ -1,538 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package number // import "go.opentelemetry.io/otel/metric/number" - -//go:generate stringer -type=Kind - -import ( - "fmt" - "math" - "sync/atomic" - - "go.opentelemetry.io/otel/internal" -) - -// Kind describes the data type of the Number. -type Kind int8 - -const ( - // Int64Kind means that the Number stores int64. - Int64Kind Kind = iota - // Float64Kind means that the Number stores float64. - Float64Kind -) - -// Zero returns a zero value for a given Kind -func (k Kind) Zero() Number { - switch k { - case Int64Kind: - return NewInt64Number(0) - case Float64Kind: - return NewFloat64Number(0.) - default: - return Number(0) - } -} - -// Minimum returns the minimum representable value -// for a given Kind -func (k Kind) Minimum() Number { - switch k { - case Int64Kind: - return NewInt64Number(math.MinInt64) - case Float64Kind: - return NewFloat64Number(-1. * math.MaxFloat64) - default: - return Number(0) - } -} - -// Maximum returns the maximum representable value -// for a given Kind -func (k Kind) Maximum() Number { - switch k { - case Int64Kind: - return NewInt64Number(math.MaxInt64) - case Float64Kind: - return NewFloat64Number(math.MaxFloat64) - default: - return Number(0) - } -} - -// Number represents either an integral or a floating point value. It -// needs to be accompanied with a source of Kind that describes -// the actual type of the value stored within Number. -type Number uint64 - -// - constructors - -// NewNumberFromRaw creates a new Number from a raw value. -func NewNumberFromRaw(r uint64) Number { - return Number(r) -} - -// NewInt64Number creates an integral Number. -func NewInt64Number(i int64) Number { - return NewNumberFromRaw(internal.Int64ToRaw(i)) -} - -// NewFloat64Number creates a floating point Number. -func NewFloat64Number(f float64) Number { - return NewNumberFromRaw(internal.Float64ToRaw(f)) -} - -// NewNumberSignChange returns a number with the same magnitude and -// the opposite sign. `kind` must describe the kind of number in `nn`. -func NewNumberSignChange(kind Kind, nn Number) Number { - switch kind { - case Int64Kind: - return NewInt64Number(-nn.AsInt64()) - case Float64Kind: - return NewFloat64Number(-nn.AsFloat64()) - } - return nn -} - -// - as x - -// AsNumber gets the Number. -func (n *Number) AsNumber() Number { - return *n -} - -// AsRaw gets the uninterpreted raw value. Might be useful for some -// atomic operations. -func (n *Number) AsRaw() uint64 { - return uint64(*n) -} - -// AsInt64 assumes that the value contains an int64 and returns it as -// such. -func (n *Number) AsInt64() int64 { - return internal.RawToInt64(n.AsRaw()) -} - -// AsFloat64 assumes that the measurement value contains a float64 and -// returns it as such. -func (n *Number) AsFloat64() float64 { - return internal.RawToFloat64(n.AsRaw()) -} - -// - as x atomic - -// AsNumberAtomic gets the Number atomically. -func (n *Number) AsNumberAtomic() Number { - return NewNumberFromRaw(n.AsRawAtomic()) -} - -// AsRawAtomic gets the uninterpreted raw value atomically. Might be -// useful for some atomic operations. -func (n *Number) AsRawAtomic() uint64 { - return atomic.LoadUint64(n.AsRawPtr()) -} - -// AsInt64Atomic assumes that the number contains an int64 and returns -// it as such atomically. -func (n *Number) AsInt64Atomic() int64 { - return atomic.LoadInt64(n.AsInt64Ptr()) -} - -// AsFloat64Atomic assumes that the measurement value contains a -// float64 and returns it as such atomically. -func (n *Number) AsFloat64Atomic() float64 { - return internal.RawToFloat64(n.AsRawAtomic()) -} - -// - as x ptr - -// AsRawPtr gets the pointer to the raw, uninterpreted raw -// value. Might be useful for some atomic operations. -func (n *Number) AsRawPtr() *uint64 { - return (*uint64)(n) -} - -// AsInt64Ptr assumes that the number contains an int64 and returns a -// pointer to it. -func (n *Number) AsInt64Ptr() *int64 { - return internal.RawPtrToInt64Ptr(n.AsRawPtr()) -} - -// AsFloat64Ptr assumes that the number contains a float64 and returns a -// pointer to it. -func (n *Number) AsFloat64Ptr() *float64 { - return internal.RawPtrToFloat64Ptr(n.AsRawPtr()) -} - -// - coerce - -// CoerceToInt64 casts the number to int64. May result in -// data/precision loss. -func (n *Number) CoerceToInt64(kind Kind) int64 { - switch kind { - case Int64Kind: - return n.AsInt64() - case Float64Kind: - return int64(n.AsFloat64()) - default: - // you get what you deserve - return 0 - } -} - -// CoerceToFloat64 casts the number to float64. May result in -// data/precision loss. -func (n *Number) CoerceToFloat64(kind Kind) float64 { - switch kind { - case Int64Kind: - return float64(n.AsInt64()) - case Float64Kind: - return n.AsFloat64() - default: - // you get what you deserve - return 0 - } -} - -// - set - -// SetNumber sets the number to the passed number. Both should be of -// the same kind. -func (n *Number) SetNumber(nn Number) { - *n.AsRawPtr() = nn.AsRaw() -} - -// SetRaw sets the number to the passed raw value. Both number and the -// raw number should represent the same kind. -func (n *Number) SetRaw(r uint64) { - *n.AsRawPtr() = r -} - -// SetInt64 assumes that the number contains an int64 and sets it to -// the passed value. -func (n *Number) SetInt64(i int64) { - *n.AsInt64Ptr() = i -} - -// SetFloat64 assumes that the number contains a float64 and sets it -// to the passed value. -func (n *Number) SetFloat64(f float64) { - *n.AsFloat64Ptr() = f -} - -// - set atomic - -// SetNumberAtomic sets the number to the passed number -// atomically. Both should be of the same kind. -func (n *Number) SetNumberAtomic(nn Number) { - atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw()) -} - -// SetRawAtomic sets the number to the passed raw value -// atomically. Both number and the raw number should represent the -// same kind. -func (n *Number) SetRawAtomic(r uint64) { - atomic.StoreUint64(n.AsRawPtr(), r) -} - -// SetInt64Atomic assumes that the number contains an int64 and sets -// it to the passed value atomically. -func (n *Number) SetInt64Atomic(i int64) { - atomic.StoreInt64(n.AsInt64Ptr(), i) -} - -// SetFloat64Atomic assumes that the number contains a float64 and -// sets it to the passed value atomically. -func (n *Number) SetFloat64Atomic(f float64) { - atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f)) -} - -// - swap - -// SwapNumber sets the number to the passed number and returns the old -// number. Both this number and the passed number should be of the -// same kind. -func (n *Number) SwapNumber(nn Number) Number { - old := *n - n.SetNumber(nn) - return old -} - -// SwapRaw sets the number to the passed raw value and returns the old -// raw value. Both number and the raw number should represent the same -// kind. -func (n *Number) SwapRaw(r uint64) uint64 { - old := n.AsRaw() - n.SetRaw(r) - return old -} - -// SwapInt64 assumes that the number contains an int64, sets it to the -// passed value and returns the old int64 value. -func (n *Number) SwapInt64(i int64) int64 { - old := n.AsInt64() - n.SetInt64(i) - return old -} - -// SwapFloat64 assumes that the number contains an float64, sets it to -// the passed value and returns the old float64 value. -func (n *Number) SwapFloat64(f float64) float64 { - old := n.AsFloat64() - n.SetFloat64(f) - return old -} - -// - swap atomic - -// SwapNumberAtomic sets the number to the passed number and returns -// the old number atomically. Both this number and the passed number -// should be of the same kind. -func (n *Number) SwapNumberAtomic(nn Number) Number { - return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw())) -} - -// SwapRawAtomic sets the number to the passed raw value and returns -// the old raw value atomically. Both number and the raw number should -// represent the same kind. -func (n *Number) SwapRawAtomic(r uint64) uint64 { - return atomic.SwapUint64(n.AsRawPtr(), r) -} - -// SwapInt64Atomic assumes that the number contains an int64, sets it -// to the passed value and returns the old int64 value atomically. -func (n *Number) SwapInt64Atomic(i int64) int64 { - return atomic.SwapInt64(n.AsInt64Ptr(), i) -} - -// SwapFloat64Atomic assumes that the number contains an float64, sets -// it to the passed value and returns the old float64 value -// atomically. -func (n *Number) SwapFloat64Atomic(f float64) float64 { - return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f))) -} - -// - add - -// AddNumber assumes that this and the passed number are of the passed -// kind and adds the passed number to this number. -func (n *Number) AddNumber(kind Kind, nn Number) { - switch kind { - case Int64Kind: - n.AddInt64(nn.AsInt64()) - case Float64Kind: - n.AddFloat64(nn.AsFloat64()) - } -} - -// AddRaw assumes that this number and the passed raw value are of the -// passed kind and adds the passed raw value to this number. -func (n *Number) AddRaw(kind Kind, r uint64) { - n.AddNumber(kind, NewNumberFromRaw(r)) -} - -// AddInt64 assumes that the number contains an int64 and adds the -// passed int64 to it. -func (n *Number) AddInt64(i int64) { - *n.AsInt64Ptr() += i -} - -// AddFloat64 assumes that the number contains a float64 and adds the -// passed float64 to it. -func (n *Number) AddFloat64(f float64) { - *n.AsFloat64Ptr() += f -} - -// - add atomic - -// AddNumberAtomic assumes that this and the passed number are of the -// passed kind and adds the passed number to this number atomically. -func (n *Number) AddNumberAtomic(kind Kind, nn Number) { - switch kind { - case Int64Kind: - n.AddInt64Atomic(nn.AsInt64()) - case Float64Kind: - n.AddFloat64Atomic(nn.AsFloat64()) - } -} - -// AddRawAtomic assumes that this number and the passed raw value are -// of the passed kind and adds the passed raw value to this number -// atomically. -func (n *Number) AddRawAtomic(kind Kind, r uint64) { - n.AddNumberAtomic(kind, NewNumberFromRaw(r)) -} - -// AddInt64Atomic assumes that the number contains an int64 and adds -// the passed int64 to it atomically. -func (n *Number) AddInt64Atomic(i int64) { - atomic.AddInt64(n.AsInt64Ptr(), i) -} - -// AddFloat64Atomic assumes that the number contains a float64 and -// adds the passed float64 to it atomically. -func (n *Number) AddFloat64Atomic(f float64) { - for { - o := n.AsFloat64Atomic() - if n.CompareAndSwapFloat64(o, o+f) { - break - } - } -} - -// - compare and swap (atomic only) - -// CompareAndSwapNumber does the atomic CAS operation on this -// number. This number and passed old and new numbers should be of the -// same kind. -func (n *Number) CompareAndSwapNumber(on, nn Number) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw()) -} - -// CompareAndSwapRaw does the atomic CAS operation on this -// number. This number and passed old and new raw values should be of -// the same kind. -func (n *Number) CompareAndSwapRaw(or, nr uint64) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr) -} - -// CompareAndSwapInt64 assumes that this number contains an int64 and -// does the atomic CAS operation on it. -func (n *Number) CompareAndSwapInt64(oi, ni int64) bool { - return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni) -} - -// CompareAndSwapFloat64 assumes that this number contains a float64 and -// does the atomic CAS operation on it. -func (n *Number) CompareAndSwapFloat64(of, nf float64) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf)) -} - -// - compare - -// CompareNumber compares two Numbers given their kind. Both numbers -// should have the same kind. This returns: -// 0 if the numbers are equal -// -1 if the subject `n` is less than the argument `nn` -// +1 if the subject `n` is greater than the argument `nn` -func (n *Number) CompareNumber(kind Kind, nn Number) int { - switch kind { - case Int64Kind: - return n.CompareInt64(nn.AsInt64()) - case Float64Kind: - return n.CompareFloat64(nn.AsFloat64()) - default: - // you get what you deserve - return 0 - } -} - -// CompareRaw compares two numbers, where one is input as a raw -// uint64, interpreting both values as a `kind` of number. -func (n *Number) CompareRaw(kind Kind, r uint64) int { - return n.CompareNumber(kind, NewNumberFromRaw(r)) -} - -// CompareInt64 assumes that the Number contains an int64 and performs -// a comparison between the value and the other value. It returns the -// typical result of the compare function: -1 if the value is less -// than the other, 0 if both are equal, 1 if the value is greater than -// the other. -func (n *Number) CompareInt64(i int64) int { - this := n.AsInt64() - if this < i { - return -1 - } else if this > i { - return 1 - } - return 0 -} - -// CompareFloat64 assumes that the Number contains a float64 and -// performs a comparison between the value and the other value. It -// returns the typical result of the compare function: -1 if the value -// is less than the other, 0 if both are equal, 1 if the value is -// greater than the other. -// -// Do not compare NaN values. -func (n *Number) CompareFloat64(f float64) int { - this := n.AsFloat64() - if this < f { - return -1 - } else if this > f { - return 1 - } - return 0 -} - -// - relations to zero - -// IsPositive returns true if the actual value is greater than zero. -func (n *Number) IsPositive(kind Kind) bool { - return n.compareWithZero(kind) > 0 -} - -// IsNegative returns true if the actual value is less than zero. -func (n *Number) IsNegative(kind Kind) bool { - return n.compareWithZero(kind) < 0 -} - -// IsZero returns true if the actual value is equal to zero. -func (n *Number) IsZero(kind Kind) bool { - return n.compareWithZero(kind) == 0 -} - -// - misc - -// Emit returns a string representation of the raw value of the -// Number. A %d is used for integral values, %f for floating point -// values. -func (n *Number) Emit(kind Kind) string { - switch kind { - case Int64Kind: - return fmt.Sprintf("%d", n.AsInt64()) - case Float64Kind: - return fmt.Sprintf("%f", n.AsFloat64()) - default: - return "" - } -} - -// AsInterface returns the number as an interface{}, typically used -// for Kind-correct JSON conversion. -func (n *Number) AsInterface(kind Kind) interface{} { - switch kind { - case Int64Kind: - return n.AsInt64() - case Float64Kind: - return n.AsFloat64() - default: - return math.NaN() - } -} - -// - private stuff - -func (n *Number) compareWithZero(kind Kind) int { - switch kind { - case Int64Kind: - return n.CompareInt64(0) - case Float64Kind: - return n.CompareFloat64(0.) - default: - // you get what you deserve - return 0 - } -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go deleted file mode 100644 index 14eb0532e4..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -import ( - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/unit" -) - -// Descriptor contains all the settings that describe an instrument, -// including its name, metric kind, number kind, and the configurable -// options. -type Descriptor struct { - name string - instrumentKind InstrumentKind - numberKind number.Kind - description string - unit unit.Unit -} - -// NewDescriptor returns a Descriptor with the given contents. -func NewDescriptor(name string, ikind InstrumentKind, nkind number.Kind, description string, unit unit.Unit) Descriptor { - return Descriptor{ - name: name, - instrumentKind: ikind, - numberKind: nkind, - description: description, - unit: unit, - } -} - -// Name returns the metric instrument's name. -func (d Descriptor) Name() string { - return d.name -} - -// InstrumentKind returns the specific kind of instrument. -func (d Descriptor) InstrumentKind() InstrumentKind { - return d.instrumentKind -} - -// Description provides a human-readable description of the metric -// instrument. -func (d Descriptor) Description() string { - return d.description -} - -// Unit describes the units of the metric instrument. Unitless -// metrics return the empty string. -func (d Descriptor) Unit() unit.Unit { - return d.unit -} - -// NumberKind returns whether this instrument is declared over int64, -// float64, or uint64 values. -func (d Descriptor) NumberKind() number.Kind { - return d.numberKind -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go deleted file mode 100644 index 64aa5ead12..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate stringer -type=InstrumentKind - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -// InstrumentKind describes the kind of instrument. -type InstrumentKind int8 - -const ( - // HistogramInstrumentKind indicates a Histogram instrument. - HistogramInstrumentKind InstrumentKind = iota - // GaugeObserverInstrumentKind indicates an GaugeObserver instrument. - GaugeObserverInstrumentKind - - // CounterInstrumentKind indicates a Counter instrument. - CounterInstrumentKind - // UpDownCounterInstrumentKind indicates a UpDownCounter instrument. - UpDownCounterInstrumentKind - - // CounterObserverInstrumentKind indicates a CounterObserver instrument. - CounterObserverInstrumentKind - // UpDownCounterObserverInstrumentKind indicates a UpDownCounterObserver - // instrument. - UpDownCounterObserverInstrumentKind -) - -// Synchronous returns whether this is a synchronous kind of instrument. -func (k InstrumentKind) Synchronous() bool { - switch k { - case CounterInstrumentKind, UpDownCounterInstrumentKind, HistogramInstrumentKind: - return true - } - return false -} - -// Asynchronous returns whether this is an asynchronous kind of instrument. -func (k InstrumentKind) Asynchronous() bool { - return !k.Synchronous() -} - -// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping). -func (k InstrumentKind) Adding() bool { - switch k { - case CounterInstrumentKind, UpDownCounterInstrumentKind, CounterObserverInstrumentKind, UpDownCounterObserverInstrumentKind: - return true - } - return false -} - -// Grouping returns whether this kind of instrument groups its inputs (as opposed to Adding). -func (k InstrumentKind) Grouping() bool { - return !k.Adding() -} - -// Monotonic returns whether this kind of instrument exposes a non-decreasing sum. -func (k InstrumentKind) Monotonic() bool { - switch k { - case CounterInstrumentKind, CounterObserverInstrumentKind: - return true - } - return false -} - -// PrecomputedSum returns whether this kind of instrument receives precomputed sums. -func (k InstrumentKind) PrecomputedSum() bool { - return k.Adding() && k.Asynchronous() -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go deleted file mode 100644 index 3a2e79d823..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type=InstrumentKind"; DO NOT EDIT. - -package sdkapi - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[HistogramInstrumentKind-0] - _ = x[GaugeObserverInstrumentKind-1] - _ = x[CounterInstrumentKind-2] - _ = x[UpDownCounterInstrumentKind-3] - _ = x[CounterObserverInstrumentKind-4] - _ = x[UpDownCounterObserverInstrumentKind-5] -} - -const _InstrumentKind_name = "HistogramInstrumentKindGaugeObserverInstrumentKindCounterInstrumentKindUpDownCounterInstrumentKindCounterObserverInstrumentKindUpDownCounterObserverInstrumentKind" - -var _InstrumentKind_index = [...]uint8{0, 23, 50, 71, 98, 127, 162} - -func (i InstrumentKind) String() string { - if i < 0 || i >= InstrumentKind(len(_InstrumentKind_index)-1) { - return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go deleted file mode 100644 index f22895dae6..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" -) - -type noopInstrument struct { - descriptor Descriptor -} -type noopSyncInstrument struct{ noopInstrument } -type noopAsyncInstrument struct{ noopInstrument } - -var _ SyncImpl = noopSyncInstrument{} -var _ AsyncImpl = noopAsyncInstrument{} - -// NewNoopSyncInstrument returns a No-op implementation of the -// synchronous instrument interface. -func NewNoopSyncInstrument() SyncImpl { - return noopSyncInstrument{ - noopInstrument{ - descriptor: Descriptor{ - instrumentKind: CounterInstrumentKind, - }, - }, - } -} - -// NewNoopAsyncInstrument returns a No-op implementation of the -// asynchronous instrument interface. -func NewNoopAsyncInstrument() AsyncImpl { - return noopAsyncInstrument{ - noopInstrument{ - descriptor: Descriptor{ - instrumentKind: CounterObserverInstrumentKind, - }, - }, - } -} - -func (noopInstrument) Implementation() interface{} { - return nil -} - -func (n noopInstrument) Descriptor() Descriptor { - return n.descriptor -} - -func (noopSyncInstrument) RecordOne(context.Context, number.Number, []attribute.KeyValue) { -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go deleted file mode 100644 index 36836364bd..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" -) - -// MeterImpl is the interface an SDK must implement to supply a Meter -// implementation. -type MeterImpl interface { - // RecordBatch atomically records a batch of measurements. - RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurement ...Measurement) - - // NewSyncInstrument returns a newly constructed - // synchronous instrument implementation or an error, should - // one occur. - NewSyncInstrument(descriptor Descriptor) (SyncImpl, error) - - // NewAsyncInstrument returns a newly constructed - // asynchronous instrument implementation or an error, should - // one occur. - NewAsyncInstrument( - descriptor Descriptor, - runner AsyncRunner, - ) (AsyncImpl, error) -} - -// InstrumentImpl is a common interface for synchronous and -// asynchronous instruments. -type InstrumentImpl interface { - // Implementation returns the underlying implementation of the - // instrument, which allows the implementation to gain access - // to its own representation especially from a `Measurement`. - Implementation() interface{} - - // Descriptor returns a copy of the instrument's Descriptor. - Descriptor() Descriptor -} - -// SyncImpl is the implementation-level interface to a generic -// synchronous instrument (e.g., Histogram and Counter instruments). -type SyncImpl interface { - InstrumentImpl - - // RecordOne captures a single synchronous metric event. - RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) -} - -// AsyncImpl is an implementation-level interface to an -// asynchronous instrument (e.g., Observer instruments). -type AsyncImpl interface { - InstrumentImpl -} - -// AsyncRunner is expected to convert into an AsyncSingleRunner or an -// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner -// does not satisfy one of these interfaces. -type AsyncRunner interface { - // AnyRunner is a non-exported method with no functional use - // other than to make this a non-empty interface. - AnyRunner() -} - -// AsyncSingleRunner is an interface implemented by single-observer -// callbacks. -type AsyncSingleRunner interface { - // Run accepts a single instrument and function for capturing - // observations of that instrument. Each call to the function - // receives one captured observation. (The function accepts - // multiple observations so the same implementation can be - // used for batch runners.) - Run(ctx context.Context, single AsyncImpl, capture func([]attribute.KeyValue, ...Observation)) - - AsyncRunner -} - -// AsyncBatchRunner is an interface implemented by batch-observer -// callbacks. -type AsyncBatchRunner interface { - // Run accepts a function for capturing observations of - // multiple instruments. - Run(ctx context.Context, capture func([]attribute.KeyValue, ...Observation)) - - AsyncRunner -} - -// NewMeasurement constructs a single observation, a binding between -// an asynchronous instrument and a number. -func NewMeasurement(instrument SyncImpl, number number.Number) Measurement { - return Measurement{ - instrument: instrument, - number: number, - } -} - -// Measurement is a low-level type used with synchronous instruments -// as a direct interface to the SDK via `RecordBatch`. -type Measurement struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument SyncImpl -} - -// SyncImpl returns the instrument that created this measurement. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Measurement) SyncImpl() SyncImpl { - return m.instrument -} - -// Number returns a number recorded in this measurement. -func (m Measurement) Number() number.Number { - return m.number -} - -// NewObservation constructs a single observation, a binding between -// an asynchronous instrument and a number. -func NewObservation(instrument AsyncImpl, number number.Number) Observation { - return Observation{ - instrument: instrument, - number: number, - } -} - -// Observation is a low-level type used with asynchronous instruments -// as a direct interface to the SDK via `BatchObserver`. -type Observation struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument AsyncImpl -} - -// AsyncImpl returns the instrument that created this observation. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Observation) AsyncImpl() AsyncImpl { - return m.instrument -} - -// Number returns a number recorded in this observation. -func (m Observation) Number() number.Number { - return m.number -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go new file mode 100644 index 0000000000..6e923acab4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package instrumentation provides types to represent the code libraries that +// provide OpenTelemetry instrumentation. These types are used in the +// OpenTelemetry signal pipelines to identify the source of telemetry. +// +// See +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md +// and +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md +// for more information. +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go index 6f0016169e..39f025a171 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -12,22 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package instrumentation provides an instrumentation library structure to be -passed to both the OpenTelemetry Tracer and Meter components. - -For more information see -[this](https://github.com/open-telemetry/oteps/blob/main/text/0083-component.md). -*/ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Library represents the instrumentation library. -type Library struct { - // Name is the name of the instrumentation library. This should be the - // Go package name of that library. - Name string - // Version is the version of the instrumentation library. - Version string - // SchemaURL of the telemetry emitted by the library. - SchemaURL string -} +// Deprecated: please use Scope instead. +type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go similarity index 56% rename from vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go rename to vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index 2f17562f0e..09c6d93f6d 100644 --- a/vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -12,13 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package registry provides a non-standalone implementation of -MeterProvider that adds uniqueness checking for instrument descriptors -on top of other MeterProvider it wraps. +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. -*/ -package registry // import "go.opentelemetry.io/otel/internal/metric/registry" +// Scope represents the instrumentation scope. +type Scope struct { + // Name is the name of the instrumentation scope. This should be the + // Go package name of that scope. + Name string + // Version is the version of the instrumentation scope. + Version string + // SchemaURL of the telemetry emitted by the scope. + SchemaURL string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go index 397fd9593c..5e94b8ae52 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -21,27 +21,72 @@ import ( "go.opentelemetry.io/otel/internal/global" ) -// Environment variable names +// Environment variable names. const ( - // BatchSpanProcessorScheduleDelayKey - // Delay interval between two consecutive exports. - // i.e. 5000 + // BatchSpanProcessorScheduleDelayKey is the delay interval between two + // consecutive exports (i.e. 5000). BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY" - // BatchSpanProcessorExportTimeoutKey - // Maximum allowed time to export data. - // i.e. 3000 + // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to + // export data (i.e. 3000). BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT" - // BatchSpanProcessorMaxQueueSizeKey - // Maximum queue size - // i.e. 2048 + // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048). BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" - // BatchSpanProcessorMaxExportBatchSizeKey - // Maximum batch size - // Note: Must be less than or equal to EnvBatchSpanProcessorMaxQueueSize - // i.e. 512 + // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. + // 512). Note: it must be less than or equal to + // EnvBatchSpanProcessorMaxQueueSize. BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" + + // AttributeValueLengthKey is the maximum allowed attribute value size. + AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // AttributeCountKey is the maximum allowed span attribute count. + AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT" + + // SpanAttributeValueLengthKey is the maximum allowed attribute value size + // for a span. + SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // SpanAttributeCountKey is the maximum allowed span attribute count for a + // span. + SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT" + + // SpanEventCountKey is the maximum allowed span event count. + SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT" + + // SpanEventAttributeCountKey is the maximum allowed attribute per span + // event count. + SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT" + + // SpanLinkCountKey is the maximum allowed span link count. + SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT" + + // SpanLinkAttributeCountKey is the maximum allowed attribute per span + // link count. + SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT" ) +// firstInt returns the value of the first matching environment variable from +// keys. If the value is not an integer or no match is found, defaultValue is +// returned. +func firstInt(defaultValue int, keys ...string) int { + for _, key := range keys { + value, ok := os.LookupEnv(key) + if !ok { + continue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue + } + + return defaultValue +} + // IntEnvOr returns the int value of the environment variable with name key if // it exists and the value is an int. Otherwise, defaultValue is returned. func IntEnvOr(key string, defaultValue int) int { @@ -86,3 +131,47 @@ func BatchSpanProcessorMaxQueueSize(defaultValue int) int { func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int { return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue) } + +// SpanAttributeValueLength returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is +// returned or defaultValue if that is not set. +func SpanAttributeValueLength(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey) +} + +// SpanAttributeCount returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or +// defaultValue if that is not set. +func SpanAttributeCount(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey) +} + +// SpanEventCount returns the environment variable value for the +// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanEventCount(defaultValue int) int { + return IntEnvOr(SpanEventCountKey, defaultValue) +} + +// SpanEventAttributeCount returns the environment variable value for the +// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue +// is returned. +func SpanEventAttributeCount(defaultValue int) int { + return IntEnvOr(SpanEventAttributeCountKey, defaultValue) +} + +// SpanLinkCount returns the environment variable value for the +// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkCount(defaultValue int) int { + return IntEnvOr(SpanLinkCountKey, defaultValue) +} + +// SpanLinkAttributeCount returns the environment variable value for the +// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkAttributeCount(defaultValue int) int { + return IntEnvOr(SpanLinkAttributeCountKey, defaultValue) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index a5eaa7e5d3..c1d220408a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -27,7 +27,7 @@ var ( ErrPartialResource = errors.New("partial resource") ) -// Detector detects OpenTelemetry resource information +// Detector detects OpenTelemetry resource information. type Detector interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 701eae40a3..aa0f942f49 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -22,7 +22,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) type ( @@ -60,9 +60,9 @@ var ( func (telemetrySDK) Detect(context.Context) (*Resource, error) { return NewWithAttributes( semconv.SchemaURL, - semconv.TelemetrySDKNameKey.String("opentelemetry"), - semconv.TelemetrySDKLanguageKey.String("go"), - semconv.TelemetrySDKVersionKey.String(otel.Version()), + semconv.TelemetrySDKName("opentelemetry"), + semconv.TelemetrySDKLanguageGo, + semconv.TelemetrySDKVersion(otel.Version()), ), nil } @@ -92,7 +92,7 @@ func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil } -// Detect implements Detector +// Detect implements Detector. func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { return StringDetector( semconv.SchemaURL, diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go index d80b5ae621..f9a2a29990 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -110,7 +110,16 @@ func WithOSDescription() Option { } // WithProcess adds all the Process attributes to the configured Resource. -// See individual WithProcess* functions to configure specific attributes. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +// +// This option is equivalent to calling WithProcessPID, +// WithProcessExecutableName, WithProcessExecutablePath, +// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName, +// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each +// option function for information about what resource attributes each +// includes. func WithProcess() Option { return WithDetectors( processPIDDetector{}, @@ -143,7 +152,11 @@ func WithProcessExecutablePath() Option { } // WithProcessCommandArgs adds an attribute with all the command arguments (including -// the command/executable itself) as received by the process the configured Resource. +// the command/executable itself) as received by the process to the configured +// Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. func WithProcessCommandArgs() Option { return WithDetectors(processCommandArgsDetector{}) } @@ -171,3 +184,18 @@ func WithProcessRuntimeVersion() Option { func WithProcessRuntimeDescription() Option { return WithDetectors(processRuntimeDescriptionDetector{}) } + +// WithContainer adds all the Container attributes to the configured Resource. +// See individual WithContainer* functions to configure specific attributes. +func WithContainer() Option { + return WithDetectors( + cgroupContainerIDDetector{}, + ) +} + +// WithContainerID adds an attribute with the id of the container to the configured Resource. +// Note: WithContainerID will not extract the correct container ID in an ECS environment. +// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs). +func WithContainerID() Option { + return WithDetectors(cgroupContainerIDDetector{}) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go new file mode 100644 index 0000000000..318dcf82fe --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "context" + "errors" + "io" + "os" + "regexp" + + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +type containerIDProvider func() (string, error) + +var ( + containerID containerIDProvider = getContainerIDFromCGroup + cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*-)?([0-9a-f]+)(?:\.|\s*$)`) +) + +type cgroupContainerIDDetector struct{} + +const cgroupPath = "/proc/self/cgroup" + +// Detect returns a *Resource that describes the id of the container. +// If no container id found, an empty resource will be returned. +func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { + containerID, err := containerID() + if err != nil { + return nil, err + } + + if containerID == "" { + return Empty(), nil + } + return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil +} + +var ( + defaultOSStat = os.Stat + osStat = defaultOSStat + + defaultOSOpen = func(name string) (io.ReadCloser, error) { + return os.Open(name) + } + osOpen = defaultOSOpen +) + +// getContainerIDFromCGroup returns the id of the container from the cgroup file. +// If no container id found, an empty string will be returned. +func getContainerIDFromCGroup() (string, error) { + if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) { + // File does not exist, skip + return "", nil + } + + file, err := osOpen(cgroupPath) + if err != nil { + return "", err + } + defer file.Close() + + return getContainerIDFromReader(file), nil +} + +// getContainerIDFromReader returns the id of the container from reader. +func getContainerIDFromReader(reader io.Reader) string { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + + if id := getContainerIDFromLine(line); id != "" { + return id + } + } + return "" +} + +// getContainerIDFromLine returns the id of the container from one string line. +func getContainerIDFromLine(line string) string { + matches := cgroupContainerIDRe.FindStringSubmatch(line) + if len(matches) <= 1 { + return "" + } + return matches[1] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 9392296cba..e32843cad1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -17,11 +17,13 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "context" "fmt" + "net/url" "os" "strings" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) const ( @@ -42,10 +44,10 @@ var ( // builtin. type fromEnv struct{} -// compile time assertion that FromEnv implements Detector interface +// compile time assertion that FromEnv implements Detector interface. var _ Detector = fromEnv{} -// Detect collects resources from environment +// Detect collects resources from environment. func (fromEnv) Detect(context.Context) (*Resource, error) { attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) svcName := strings.TrimSpace(os.Getenv(svcNameKey)) @@ -57,7 +59,7 @@ func (fromEnv) Detect(context.Context) (*Resource, error) { var res *Resource if svcName != "" { - res = NewSchemaless(semconv.ServiceNameKey.String(svcName)) + res = NewSchemaless(semconv.ServiceName(svcName)) } r2, err := constructOTResources(attrs) @@ -88,7 +90,14 @@ func constructOTResources(s string) (*Resource, error) { invalid = append(invalid, p) continue } - k, v := strings.TrimSpace(field[0]), strings.TrimSpace(field[1]) + k := strings.TrimSpace(field[0]) + v, err := url.QueryUnescape(strings.TrimSpace(field[1])) + if err != nil { + // Retain original value if decoding fails, otherwise it will be + // an empty string. + v = field[1] + otel.Handle(err) + } attrs = append(attrs, attribute.String(k, v)) } var err error diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 59329770cf..815fe5c204 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -19,7 +19,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) type osDescriptionProvider func() (string, error) @@ -63,7 +63,7 @@ func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { return NewWithAttributes( semconv.SchemaURL, - semconv.OSDescriptionKey.String(description), + semconv.OSDescription(description), ), nil } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go index 42894a15b5..1c84afc185 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -18,7 +18,6 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( - "bytes" "fmt" "os" @@ -69,23 +68,14 @@ func uname() (string, error) { } return fmt.Sprintf("%s %s %s %s %s", - charsToString(utsName.Sysname[:]), - charsToString(utsName.Nodename[:]), - charsToString(utsName.Release[:]), - charsToString(utsName.Version[:]), - charsToString(utsName.Machine[:]), + unix.ByteSliceToString(utsName.Sysname[:]), + unix.ByteSliceToString(utsName.Nodename[:]), + unix.ByteSliceToString(utsName.Release[:]), + unix.ByteSliceToString(utsName.Version[:]), + unix.ByteSliceToString(utsName.Machine[:]), ), nil } -// charsToString converts a C-like null-terminated char array to a Go string. -func charsToString(charArray []byte) string { - if i := bytes.IndexByte(charArray, 0); i >= 0 { - charArray = charArray[:i] - } - - return string(charArray) -} - // getFirstAvailableFile returns an *os.File of the first available // file from a list of candidate file paths. func getFirstAvailableFile(candidates []string) (*os.File, error) { diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 80d5e69932..bdd0e7fe68 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -22,7 +22,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) type pidProvider func() int @@ -39,7 +39,12 @@ var ( defaultExecutablePathProvider executablePathProvider = os.Executable defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } defaultOwnerProvider ownerProvider = user.Current - defaultRuntimeNameProvider runtimeNameProvider = func() string { return runtime.Compiler } + defaultRuntimeNameProvider runtimeNameProvider = func() string { + if runtime.Compiler == "gc" { + return "go" + } + return runtime.Compiler + } defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } @@ -115,14 +120,14 @@ type processRuntimeDescriptionDetector struct{} // Detect returns a *Resource that describes the process identifier (PID) of the // executing process. func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPIDKey.Int(pid())), nil + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil } // Detect returns a *Resource that describes the name of the process executable. func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { executableName := filepath.Base(commandArgs()[0]) - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableNameKey.String(executableName)), nil + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil } // Detect returns a *Resource that describes the full path of the process executable. @@ -132,13 +137,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err return nil, err } - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePathKey.String(executablePath)), nil + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil } // Detect returns a *Resource that describes all the command arguments as received // by the process. func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgsKey.StringSlice(commandArgs())), nil + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil } // Detect returns a *Resource that describes the username of the user that owns the @@ -149,18 +154,18 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { return nil, err } - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwnerKey.String(owner.Username)), nil + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil } // Detect returns a *Resource that describes the name of the compiler used to compile // this process image. func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeNameKey.String(runtimeName())), nil + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil } // Detect returns a *Resource that describes the version of the runtime of this process. func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersionKey.String(runtimeVersion())), nil + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil } // Detect returns a *Resource that describes the runtime of this process. @@ -170,6 +175,6 @@ func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, return NewWithAttributes( semconv.SchemaURL, - semconv.ProcessRuntimeDescriptionKey.String(runtimeDescription), + semconv.ProcessRuntimeDescription(runtimeDescription), ), nil } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index e842744ae9..c425ff05db 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -129,6 +129,7 @@ func (r *Resource) Attributes() []attribute.KeyValue { return r.attrs.ToSlice() } +// SchemaURL returns the schema URL associated with Resource r. func (r *Resource) SchemaURL() string { if r == nil { return "" @@ -179,13 +180,14 @@ func Merge(a, b *Resource) (*Resource, error) { // Merge the schema URL. var schemaURL string - if a.schemaURL == "" { + switch true { + case a.schemaURL == "": schemaURL = b.schemaURL - } else if b.schemaURL == "" { + case b.schemaURL == "": schemaURL = a.schemaURL - } else if a.schemaURL == b.schemaURL { + case a.schemaURL == b.schemaURL: schemaURL = a.schemaURL - } else { + default: return Empty(), errMergeConflictSchemaURL } @@ -194,7 +196,7 @@ func Merge(a, b *Resource) (*Resource, error) { mi := attribute.NewMergeIterator(b.Set(), a.Set()) combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) for mi.Next() { - combine = append(combine, mi.Label()) + combine = append(combine, mi.Attribute()) } merged := NewWithAttributes(schemaURL, combine...) return merged, nil diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 67e2732c3c..a2d7db4900 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -35,8 +35,11 @@ const ( DefaultMaxExportBatchSize = 512 ) +// BatchSpanProcessorOption configures a BatchSpanProcessor. type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) +// BatchSpanProcessorOptions is configuration settings for a +// BatchSpanProcessor. type BatchSpanProcessorOptions struct { // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior. @@ -181,7 +184,7 @@ func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { var err error if bsp.e != nil { flushCh := make(chan struct{}) - if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}, true) { + if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) { select { case <-flushCh: // Processed any items in queue prior to ForceFlush being called @@ -205,30 +208,43 @@ func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { return err } +// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the +// maximum queue size allowed for a BatchSpanProcessor. func WithMaxQueueSize(size int) BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.MaxQueueSize = size } } +// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures +// the maximum export batch size allowed for a BatchSpanProcessor. func WithMaxExportBatchSize(size int) BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.MaxExportBatchSize = size } } +// WithBatchTimeout returns a BatchSpanProcessorOption that configures the +// maximum delay allowed for a BatchSpanProcessor before it will export any +// held span (whether the queue is full or not). func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.BatchTimeout = delay } } +// WithExportTimeout returns a BatchSpanProcessorOption that configures the +// amount of time a BatchSpanProcessor waits for an exporter to export before +// abandoning the export. func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.ExportTimeout = timeout } } +// WithBlocking returns a BatchSpanProcessorOption that configures a +// BatchSpanProcessor to wait for enqueue operations to succeed instead of +// dropping data when the queue is full. func WithBlocking() BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.BlockOnQueueFull = true @@ -237,7 +253,6 @@ func WithBlocking() BatchSpanProcessorOption { // exportSpans is a subroutine of processing and draining the queue. func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { - bsp.timer.Reset(bsp.o.BatchTimeout) bsp.batchMutex.Lock() @@ -250,7 +265,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { } if l := len(bsp.batch); l > 0 { - global.Debug("exporting spans", "count", len(bsp.batch), "dropped", atomic.LoadUint32(&bsp.dropped)) + global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) err := bsp.e.ExportSpans(ctx, bsp.batch) // A new batch is always created after exporting, even if the batch failed to be exported. @@ -335,28 +350,35 @@ func (bsp *batchSpanProcessor) drainQueue() { } func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { - bsp.enqueueBlockOnQueueFull(context.TODO(), sd, bsp.o.BlockOnQueueFull) + ctx := context.TODO() + if bsp.o.BlockOnQueueFull { + bsp.enqueueBlockOnQueueFull(ctx, sd) + } else { + bsp.enqueueDrop(ctx, sd) + } } -func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan, block bool) bool { +func recoverSendOnClosedChan() { + x := recover() + switch err := x.(type) { + case nil: + return + case runtime.Error: + if err.Error() == "send on closed channel" { + return + } + } + panic(x) +} + +func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } // This ensures the bsp.queue<- below does not panic as the // processor shuts down. - defer func() { - x := recover() - switch err := x.(type) { - case nil: - return - case runtime.Error: - if err.Error() == "send on closed channel" { - return - } - } - panic(x) - }() + defer recoverSendOnClosedChan() select { case <-bsp.stopCh: @@ -364,13 +386,27 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R default: } - if block { - select { - case bsp.queue <- sd: - return true - case <-ctx.Done(): - return false - } + select { + case bsp.queue <- sd: + return true + case <-ctx.Done(): + return false + } +} + +func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + + // This ensures the bsp.queue<- below does not panic as the + // processor shuts down. + defer recoverSendOnClosedChan() + + select { + case <-bsp.stopCh: + return false + default: } select { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/config.go b/vendor/go.opentelemetry.io/otel/sdk/trace/config.go deleted file mode 100644 index 61a3043925..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/config.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -// SpanLimits represents the limits of a span. -type SpanLimits struct { - // AttributeCountLimit is the maximum allowed span attribute count. - AttributeCountLimit int - - // EventCountLimit is the maximum allowed span event count. - EventCountLimit int - - // LinkCountLimit is the maximum allowed span link count. - LinkCountLimit int - - // AttributePerEventCountLimit is the maximum allowed attribute per span event count. - AttributePerEventCountLimit int - - // AttributePerLinkCountLimit is the maximum allowed attribute per span link count. - AttributePerLinkCountLimit int -} - -func (sl *SpanLimits) ensureDefault() { - if sl.EventCountLimit <= 0 { - sl.EventCountLimit = DefaultEventCountLimit - } - if sl.AttributeCountLimit <= 0 { - sl.AttributeCountLimit = DefaultAttributeCountLimit - } - if sl.LinkCountLimit <= 0 { - sl.LinkCountLimit = DefaultLinkCountLimit - } - if sl.AttributePerEventCountLimit <= 0 { - sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit - } - if sl.AttributePerLinkCountLimit <= 0 { - sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit - } -} - -const ( - // DefaultAttributeCountLimit is the default maximum allowed span attribute count. - DefaultAttributeCountLimit = 128 - - // DefaultEventCountLimit is the default maximum allowed span event count. - DefaultEventCountLimit = 128 - - // DefaultLinkCountLimit is the default maximum allowed span link count. - DefaultLinkCountLimit = 128 - - // DefaultAttributePerEventCountLimit is the default maximum allowed attribute per span event count. - DefaultAttributePerEventCountLimit = 128 - - // DefaultAttributePerLinkCountLimit is the default maximum allowed attribute per span link count. - DefaultAttributePerLinkCountLimit = 128 -) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go index 8e89e19d4b..d1c86e59b2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -29,7 +29,12 @@ func newEvictedQueue(capacity int) evictedQueue { // add adds value to the evictedQueue eq. If eq is at capacity, the oldest // queued value will be discarded and the drop count incremented. func (eq *evictedQueue) add(value interface{}) { - if len(eq.queue) == eq.capacity { + if eq.capacity == 0 { + eq.droppedCount++ + return + } + + if eq.capacity > 0 && len(eq.queue) == eq.capacity { // Drop first-in while avoiding allocating more capacity to eq.queue. copy(eq.queue[:eq.capacity-1], eq.queue[1:]) eq.queue = eq.queue[:eq.capacity-1] diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index c9e2802ac5..bba246041a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -52,7 +52,7 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace gen.Lock() defer gen.Unlock() sid := trace.SpanID{} - gen.randSource.Read(sid[:]) + _, _ = gen.randSource.Read(sid[:]) return sid } @@ -62,9 +62,9 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace. gen.Lock() defer gen.Unlock() tid := trace.TraceID{} - gen.randSource.Read(tid[:]) + _, _ = gen.randSource.Read(tid[:]) sid := trace.SpanID{} - gen.randSource.Read(sid[:]) + _, _ = gen.randSource.Read(sid[:]) return tid, sid } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index c6b311f9cd..201c178170 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -31,7 +31,7 @@ const ( defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" ) -// tracerProviderConfig +// tracerProviderConfig. type tracerProviderConfig struct { // processors contains collection of SpanProcessors that are processing pipeline // for spans in the trace signal. @@ -70,10 +70,13 @@ func (cfg tracerProviderConfig) MarshalLog() interface{} { } } +// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to +// instrumentation so it can trace operational flow through a system. type TracerProvider struct { mu sync.Mutex - namedTracer map[instrumentation.Library]*tracer + namedTracer map[instrumentation.Scope]*tracer spanProcessors atomic.Value + isShutdown bool // These fields are not protected by the lock mu. They are assumed to be // immutable after creation of the TracerProvider. @@ -88,15 +91,18 @@ var _ trace.TracerProvider = &TracerProvider{} // NewTracerProvider returns a new and configured TracerProvider. // // By default the returned TracerProvider is configured with: -// - a ParentBased(AlwaysSample) Sampler -// - a random number IDGenerator -// - the resource.Default() Resource -// - the default SpanLimits. +// - a ParentBased(AlwaysSample) Sampler +// - a random number IDGenerator +// - the resource.Default() Resource +// - the default SpanLimits. // // The passed opts are used to override these default values and configure the // returned TracerProvider appropriately. func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { - o := tracerProviderConfig{} + o := tracerProviderConfig{ + spanLimits: NewSpanLimits(), + } + o = applyTracerProviderEnvConfigs(o) for _, opt := range opts { o = opt.apply(o) @@ -105,18 +111,19 @@ func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { o = ensureValidTracerProviderConfig(o) tp := &TracerProvider{ - namedTracer: make(map[instrumentation.Library]*tracer), + namedTracer: make(map[instrumentation.Scope]*tracer), sampler: o.sampler, idGenerator: o.idGenerator, spanLimits: o.spanLimits, resource: o.resource, } - global.Info("TracerProvider created", "config", o) + spss := spanProcessorStates{} for _, sp := range o.processors { - tp.RegisterSpanProcessor(sp) + spss = append(spss, newSpanProcessorState(sp)) } + tp.spanProcessors.Store(spss) return tp } @@ -136,62 +143,62 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T if name == "" { name = defaultTracerName } - il := instrumentation.Library{ + is := instrumentation.Scope{ Name: name, Version: c.InstrumentationVersion(), SchemaURL: c.SchemaURL(), } - t, ok := p.namedTracer[il] + t, ok := p.namedTracer[is] if !ok { t = &tracer{ - provider: p, - instrumentationLibrary: il, + provider: p, + instrumentationScope: is, } - p.namedTracer[il] = t + p.namedTracer[is] = t global.Info("Tracer created", "name", name, "version", c.InstrumentationVersion(), "schemaURL", c.SchemaURL()) } return t } -// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors -func (p *TracerProvider) RegisterSpanProcessor(s SpanProcessor) { +// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. +func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { p.mu.Lock() defer p.mu.Unlock() - new := spanProcessorStates{} - if old, ok := p.spanProcessors.Load().(spanProcessorStates); ok { - new = append(new, old...) - } - newSpanSync := &spanProcessorState{ - sp: s, - state: &sync.Once{}, - } - new = append(new, newSpanSync) - p.spanProcessors.Store(new) -} - -// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors -func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) { - p.mu.Lock() - defer p.mu.Unlock() - spss := spanProcessorStates{} - old, ok := p.spanProcessors.Load().(spanProcessorStates) - if !ok || len(old) == 0 { + if p.isShutdown { return } + newSPS := spanProcessorStates{} + newSPS = append(newSPS, p.spanProcessors.Load().(spanProcessorStates)...) + newSPS = append(newSPS, newSpanProcessorState(sp)) + p.spanProcessors.Store(newSPS) +} + +// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors. +func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) { + p.mu.Lock() + defer p.mu.Unlock() + if p.isShutdown { + return + } + old := p.spanProcessors.Load().(spanProcessorStates) + if len(old) == 0 { + return + } + spss := spanProcessorStates{} spss = append(spss, old...) // stop the span processor if it is started and remove it from the list var stopOnce *spanProcessorState var idx int for i, sps := range spss { - if sps.sp == s { + if sps.sp == sp { stopOnce = sps idx = i } } if stopOnce != nil { stopOnce.state.Do(func() { - if err := s.Shutdown(context.Background()); err != nil { + if err := sp.Shutdown(context.Background()); err != nil { otel.Handle(err) } }) @@ -208,10 +215,7 @@ func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) { // ForceFlush immediately exports all spans that have not yet been exported for // all the registered span processors. func (p *TracerProvider) ForceFlush(ctx context.Context) error { - spss, ok := p.spanProcessors.Load().(spanProcessorStates) - if !ok { - return fmt.Errorf("failed to load span processors") - } + spss := p.spanProcessors.Load().(spanProcessorStates) if len(spss) == 0 { return nil } @@ -230,16 +234,19 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error { return nil } -// Shutdown shuts down the span processors in the order they were registered. +// Shutdown shuts down TracerProvider. All registered span processors are shut down +// in the order they were registered and any held computational resources are released. func (p *TracerProvider) Shutdown(ctx context.Context) error { - spss, ok := p.spanProcessors.Load().(spanProcessorStates) - if !ok { - return fmt.Errorf("failed to load span processors") - } + spss := p.spanProcessors.Load().(spanProcessorStates) if len(spss) == 0 { return nil } + p.mu.Lock() + defer p.mu.Unlock() + p.isShutdown = true + + var retErr error for _, sps := range spss { select { case <-ctx.Done(): @@ -252,12 +259,19 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { err = sps.sp.Shutdown(ctx) }) if err != nil { - return err + if retErr == nil { + retErr = err + } else { + // Poor man's list of errors + retErr = fmt.Errorf("%v; %v", retErr, err) + } } } - return nil + p.spanProcessors.Store(spanProcessorStates{}) + return retErr } +// TracerProviderOption configures a TracerProvider. type TracerProviderOption interface { apply(tracerProviderConfig) tracerProviderConfig } @@ -333,7 +347,10 @@ func WithIDGenerator(g IDGenerator) TracerProviderOption { // Tracers the TracerProvider creates to make their sampling decisions for the // Spans they create. // -// If this option is not used, the TracerProvider will use a +// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER +// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used +// and the sampler is not configured through environment variables or the environment +// contains invalid/unsupported configuration, the TracerProvider will use a // ParentBased(AlwaysSample) Sampler by default. func WithSampler(s Sampler) TracerProviderOption { return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { @@ -344,20 +361,91 @@ func WithSampler(s Sampler) TracerProviderOption { }) } -// WithSpanLimits returns a TracerProviderOption that will configure the -// SpanLimits sl as a TracerProvider's SpanLimits. The configured SpanLimits -// are used used by the Tracers the TracerProvider and the Spans they create -// to limit tracing resources used. +// WithSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span +// created by a Tracer from the TracerProvider. // -// If this option is not used, the TracerProvider will use the default -// SpanLimits. +// If any field of sl is zero or negative it will be replaced with the default +// value for that field. +// +// If this or WithRawSpanLimits are not provided, the TracerProvider will use +// the limits defined by environment variables, or the defaults if unset. +// Refer to the NewSpanLimits documentation for information about this +// relationship. +// +// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited +// and zero limits. This option will be kept until the next major version +// incremented release. func WithSpanLimits(sl SpanLimits) TracerProviderOption { + if sl.AttributeValueLengthLimit <= 0 { + sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit + } + if sl.AttributeCountLimit <= 0 { + sl.AttributeCountLimit = DefaultAttributeCountLimit + } + if sl.EventCountLimit <= 0 { + sl.EventCountLimit = DefaultEventCountLimit + } + if sl.AttributePerEventCountLimit <= 0 { + sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit + } + if sl.LinkCountLimit <= 0 { + sl.LinkCountLimit = DefaultLinkCountLimit + } + if sl.AttributePerLinkCountLimit <= 0 { + sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit + } return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { cfg.spanLimits = sl return cfg }) } +// WithRawSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use these limits. These limits bound any Span created by +// a Tracer from the TracerProvider. +// +// The limits will be used as-is. Zero or negative values will not be changed +// to the default value like WithSpanLimits does. Setting a limit to zero will +// effectively disable the related resource it limits and setting to a +// negative value will mean that resource is unlimited. Consequentially, this +// means that the zero-value SpanLimits will disable all span resources. +// Because of this, limits should be constructed using NewSpanLimits and +// updated accordingly. +// +// If this or WithSpanLimits are not provided, the TracerProvider will use the +// limits defined by environment variables, or the defaults if unset. Refer to +// the NewSpanLimits documentation for information about this relationship. +func WithRawSpanLimits(limits SpanLimits) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.spanLimits = limits + return cfg + }) +} + +func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig { + for _, opt := range tracerProviderOptionsFromEnv() { + cfg = opt.apply(cfg) + } + + return cfg +} + +func tracerProviderOptionsFromEnv() []TracerProviderOption { + var opts []TracerProviderOption + + sampler, err := samplerFromEnv() + if err != nil { + otel.Handle(err) + } + + if sampler != nil { + opts = append(opts, WithSampler(sampler)) + } + + return opts +} + // ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid. func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig { if cfg.sampler == nil { @@ -366,7 +454,6 @@ func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderCon if cfg.idGenerator == nil { cfg.idGenerator = defaultIDGenerator() } - cfg.spanLimits.ensureDefault() if cfg.resource == nil { cfg.resource = resource.Default() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go new file mode 100644 index 0000000000..02053b318a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" +) + +const ( + tracesSamplerKey = "OTEL_TRACES_SAMPLER" + tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG" + + samplerAlwaysOn = "always_on" + samplerAlwaysOff = "always_off" + samplerTraceIDRatio = "traceidratio" + samplerParentBasedAlwaysOn = "parentbased_always_on" + samplerParsedBasedAlwaysOff = "parentbased_always_off" + samplerParentBasedTraceIDRatio = "parentbased_traceidratio" +) + +type errUnsupportedSampler string + +func (e errUnsupportedSampler) Error() string { + return fmt.Sprintf("unsupported sampler: %s", string(e)) +} + +var ( + errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0") + errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0") +) + +type samplerArgParseError struct { + parseErr error +} + +func (e samplerArgParseError) Error() string { + return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) +} + +func (e samplerArgParseError) Unwrap() error { + return e.parseErr +} + +func samplerFromEnv() (Sampler, error) { + sampler, ok := os.LookupEnv(tracesSamplerKey) + if !ok { + return nil, nil + } + + sampler = strings.ToLower(strings.TrimSpace(sampler)) + samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey) + samplerArg = strings.TrimSpace(samplerArg) + + switch sampler { + case samplerAlwaysOn: + return AlwaysSample(), nil + case samplerAlwaysOff: + return NeverSample(), nil + case samplerTraceIDRatio: + if !hasSamplerArg { + return TraceIDRatioBased(1.0), nil + } + return parseTraceIDRatio(samplerArg) + case samplerParentBasedAlwaysOn: + return ParentBased(AlwaysSample()), nil + case samplerParsedBasedAlwaysOff: + return ParentBased(NeverSample()), nil + case samplerParentBasedTraceIDRatio: + if !hasSamplerArg { + return ParentBased(TraceIDRatioBased(1.0)), nil + } + ratio, err := parseTraceIDRatio(samplerArg) + return ParentBased(ratio), err + default: + return nil, errUnsupportedSampler(sampler) + } +} + +func parseTraceIDRatio(arg string) (Sampler, error) { + v, err := strconv.ParseFloat(arg, 64) + if err != nil { + return TraceIDRatioBased(1.0), samplerArgParseError{err} + } + if v < 0.0 { + return TraceIDRatioBased(1.0), errNegativeTraceIDRatio + } + if v > 1.0 { + return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio + } + + return TraceIDRatioBased(v), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index a4ac588f66..5ee9715d27 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -53,17 +53,17 @@ type SamplingParameters struct { // SamplingDecision indicates whether a span is dropped, recorded and/or sampled. type SamplingDecision uint8 -// Valid sampling decisions +// Valid sampling decisions. const ( - // Drop will not record the span and all attributes/events will be dropped + // Drop will not record the span and all attributes/events will be dropped. Drop SamplingDecision = iota // Record indicates the span's `IsRecording() == true`, but `Sampled` flag - // *must not* be set + // *must not* be set. RecordOnly // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag - // *must* be set + // *must* be set. RecordAndSample ) @@ -81,7 +81,7 @@ type traceIDRatioSampler struct { func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { psc := trace.SpanContextFromContext(p.ParentContext) - x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 if x < ts.traceIDUpperBound { return SamplingResult{ Decision: RecordAndSample, @@ -102,6 +102,7 @@ func (ts traceIDRatioSampler) Description() string { // always sample. Fractions < 0 are treated as zero. To respect the // parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used // as a delegate of a `Parent` sampler. +// //nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` func TraceIDRatioBased(fraction float64) Sampler { if fraction >= 1 { @@ -162,10 +163,10 @@ func NeverSample() Sampler { // the root(Sampler) is used to make sampling decision. If the span has // a parent, depending on whether the parent is remote and whether it // is sampled, one of the following samplers will apply: -// - remoteParentSampled(Sampler) (default: AlwaysOn) -// - remoteParentNotSampled(Sampler) (default: AlwaysOff) -// - localParentSampled(Sampler) (default: AlwaysOn) -// - localParentNotSampled(Sampler) (default: AlwaysOff) +// - remoteParentSampled(Sampler) (default: AlwaysOn) +// - remoteParentNotSampled(Sampler) (default: AlwaysOff) +// - localParentSampled(Sampler) (default: AlwaysOn) +// - localParentNotSampled(Sampler) (default: AlwaysOff) func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { return parentBased{ root: root, diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index d31d3c1caf..e8530a9593 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -116,7 +116,7 @@ func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { return nil } -// MarshalLog is the marshaling function used by the logging system to represent this exporter. +// MarshalLog is the marshaling function used by the logging system to represent this Span Processor. func (ssp *simpleSpanProcessor) MarshalLog() interface{} { return struct { Type string diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index 53aac61f5f..0349b2f198 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -26,22 +26,22 @@ import ( // snapshot is an record of a spans state at a particular checkpointed time. // It is used as a read-only representation of that state. type snapshot struct { - name string - spanContext trace.SpanContext - parent trace.SpanContext - spanKind trace.SpanKind - startTime time.Time - endTime time.Time - attributes []attribute.KeyValue - events []Event - links []Link - status Status - childSpanCount int - droppedAttributeCount int - droppedEventCount int - droppedLinkCount int - resource *resource.Resource - instrumentationLibrary instrumentation.Library + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []Event + links []Link + status Status + childSpanCount int + droppedAttributeCount int + droppedEventCount int + droppedLinkCount int + resource *resource.Resource + instrumentationScope instrumentation.Scope } var _ ReadOnlySpan = snapshot{} @@ -102,10 +102,16 @@ func (s snapshot) Status() Status { return s.status } +// InstrumentationScope returns information about the instrumentation +// scope that created the span. +func (s snapshot) InstrumentationScope() instrumentation.Scope { + return s.instrumentationScope +} + // InstrumentationLibrary returns information about the instrumentation // library that created the span. func (s snapshot) InstrumentationLibrary() instrumentation.Library { - return s.instrumentationLibrary + return s.instrumentationScope } // Resource returns information about the entity that produced the span. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 779cde691d..9fb483a99f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,15 +20,17 @@ import ( "reflect" "runtime" rt "runtime/trace" + "strings" "sync" "time" + "unicode/utf8" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/internal" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" ) @@ -63,8 +65,12 @@ type ReadOnlySpan interface { Events() []Event // Status returns the spans status. Status() Status + // InstrumentationScope returns information about the instrumentation + // scope that created the span. + InstrumentationScope() instrumentation.Scope // InstrumentationLibrary returns information about the instrumentation // library that created the span. + // Deprecated: please use InstrumentationScope instead. InstrumentationLibrary() instrumentation.Library // Resource returns information about the entity that produced the span. Resource() *resource.Resource @@ -183,15 +189,18 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) { if !s.IsRecording() { return } + s.mu.Lock() + defer s.mu.Unlock() + if s.status.Code > code { + return + } status := Status{Code: code} if code == codes.Error { status.Description = description } - s.mu.Lock() s.status = status - s.mu.Unlock() } // SetAttributes sets attributes of this span. @@ -212,10 +221,17 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { s.mu.Lock() defer s.mu.Unlock() + limit := s.tracer.provider.spanLimits.AttributeCountLimit + if limit == 0 { + // No attributes allowed. + s.droppedAttributes += len(attributes) + return + } + // If adding these attributes could exceed the capacity of s perform a // de-duplication and truncation while adding to avoid over allocation. - if len(s.attributes)+len(attributes) > s.tracer.provider.spanLimits.AttributeCountLimit { - s.addOverCapAttrs(attributes) + if limit > 0 && len(s.attributes)+len(attributes) > limit { + s.addOverCapAttrs(limit, attributes) return } @@ -227,21 +243,25 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { s.droppedAttributes++ continue } + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes = append(s.attributes, a) } } // addOverCapAttrs adds the attributes attrs to the span s while // de-duplicating the attributes of s and attrs and dropping attributes that -// exceed the capacity of s. +// exceed the limit. // // This method assumes s.mu.Lock is held by the caller. // // This method should only be called when there is a possibility that adding -// attrs to s will exceed the capacity of s. Otherwise, attrs should be added -// to s without checking for duplicates and all retrieval methods of the -// attributes for s will de-duplicate as needed. -func (s *recordingSpan) addOverCapAttrs(attrs []attribute.KeyValue) { +// attrs to s will exceed the limit. Otherwise, attrs should be added to s +// without checking for duplicates and all retrieval methods of the attributes +// for s will de-duplicate as needed. +// +// This method assumes limit is a value > 0. The argument should be validated +// by the caller. +func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { // In order to not allocate more capacity to s.attributes than needed, // prune and truncate this addition of attributes while adding. @@ -265,17 +285,73 @@ func (s *recordingSpan) addOverCapAttrs(attrs []attribute.KeyValue) { continue } - if len(s.attributes) >= s.tracer.provider.spanLimits.AttributeCountLimit { + if len(s.attributes) >= limit { // Do not just drop all of the remaining attributes, make sure // updates are checked and performed. s.droppedAttributes++ } else { + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes = append(s.attributes, a) exists[a.Key] = len(s.attributes) - 1 } } } +// truncateAttr returns a truncated version of attr. Only string and string +// slice attribute values are truncated. String values are truncated to at +// most a length of limit. Each string slice value is truncated in this fashion +// (the slice length itself is unaffected). +// +// No truncation is perfromed for a negative limit. +func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { + if limit < 0 { + return attr + } + switch attr.Value.Type() { + case attribute.STRING: + if v := attr.Value.AsString(); len(v) > limit { + return attr.Key.String(safeTruncate(v, limit)) + } + case attribute.STRINGSLICE: + v := attr.Value.AsStringSlice() + for i := range v { + if len(v[i]) > limit { + v[i] = safeTruncate(v[i], limit) + } + } + return attr.Key.StringSlice(v) + } + return attr +} + +// safeTruncate truncates the string and guarantees valid UTF-8 is returned. +func safeTruncate(input string, limit int) string { + if trunc, ok := safeTruncateValidUTF8(input, limit); ok { + return trunc + } + trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) + return trunc +} + +// safeTruncateValidUTF8 returns a copy of the input string safely truncated to +// limit. The truncation is ensured to occur at the bounds of complete UTF-8 +// characters. If invalid encoding of UTF-8 is encountered, input is returned +// with false, otherwise, the truncated input will be returned with true. +func safeTruncateValidUTF8(input string, limit int) (string, bool) { + for cnt := 0; cnt <= limit; { + r, size := utf8.DecodeRuneInString(input[cnt:]) + if r == utf8.RuneError { + return input, false + } + + if cnt+size > limit { + return input[:cnt], true + } + cnt += size + } + return input, true +} + // End ends the span. This method does nothing if the span is already ended or // is not being recorded. // @@ -307,14 +383,14 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { defer panic(recovered) opts := []trace.EventOption{ trace.WithAttributes( - semconv.ExceptionTypeKey.String(typeStr(recovered)), - semconv.ExceptionMessageKey.String(fmt.Sprint(recovered)), + semconv.ExceptionType(typeStr(recovered)), + semconv.ExceptionMessage(fmt.Sprint(recovered)), ), } if config.StackTrace() { opts = append(opts, trace.WithAttributes( - semconv.ExceptionStacktraceKey.String(recordStackTrace()), + semconv.ExceptionStacktrace(recordStackTrace()), )) } @@ -334,14 +410,13 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } s.mu.Unlock() - if sps, ok := s.tracer.provider.spanProcessors.Load().(spanProcessorStates); ok { - if len(sps) == 0 { - return - } - snap := s.snapshot() - for _, sp := range sps { - sp.sp.OnEnd(snap) - } + sps := s.tracer.provider.spanProcessors.Load().(spanProcessorStates) + if len(sps) == 0 { + return + } + snap := s.snapshot() + for _, sp := range sps { + sp.sp.OnEnd(snap) } } @@ -355,14 +430,14 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { } opts = append(opts, trace.WithAttributes( - semconv.ExceptionTypeKey.String(typeStr(err)), - semconv.ExceptionMessageKey.String(err.Error()), + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), )) c := trace.NewEventConfig(opts...) if c.StackTrace() { opts = append(opts, trace.WithAttributes( - semconv.ExceptionStacktraceKey.String(recordStackTrace()), + semconv.ExceptionStacktrace(recordStackTrace()), )) } @@ -396,22 +471,23 @@ func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { c := trace.NewEventConfig(o...) + e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} - // Discard over limited attributes - attributes := c.Attributes() - var discarded int - if len(attributes) > s.tracer.provider.spanLimits.AttributePerEventCountLimit { - discarded = len(attributes) - s.tracer.provider.spanLimits.AttributePerEventCountLimit - attributes = attributes[:s.tracer.provider.spanLimits.AttributePerEventCountLimit] + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit + if limit == 0 { + // Drop all attributes. + e.DroppedAttributeCount = len(e.Attributes) + e.Attributes = nil + } else if limit > 0 && len(e.Attributes) > limit { + // Drop over capacity. + e.DroppedAttributeCount = len(e.Attributes) - limit + e.Attributes = e.Attributes[:limit] } + s.mu.Lock() - defer s.mu.Unlock() - s.events.add(Event{ - Name: name, - Attributes: attributes, - DroppedAttributeCount: discarded, - Time: c.Timestamp(), - }) + s.events.add(e) + s.mu.Unlock() } // SetName sets the name of this span. If this span is not being recorded than @@ -531,12 +607,20 @@ func (s *recordingSpan) Status() Status { return s.status } +// InstrumentationScope returns the instrumentation.Scope associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.instrumentationScope +} + // InstrumentationLibrary returns the instrumentation.Library associated with // the Tracer that created this span. func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { s.mu.Lock() defer s.mu.Unlock() - return s.tracer.instrumentationLibrary + return s.tracer.instrumentationScope } // Resource returns the Resource associated with the Tracer that created this @@ -551,18 +635,23 @@ func (s *recordingSpan) addLink(link trace.Link) { if !s.IsRecording() || !link.SpanContext.IsValid() { return } - s.mu.Lock() - defer s.mu.Unlock() - var droppedAttributeCount int + l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} - // Discard over limited attributes - if len(link.Attributes) > s.tracer.provider.spanLimits.AttributePerLinkCountLimit { - droppedAttributeCount = len(link.Attributes) - s.tracer.provider.spanLimits.AttributePerLinkCountLimit - link.Attributes = link.Attributes[:s.tracer.provider.spanLimits.AttributePerLinkCountLimit] + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit + if limit == 0 { + // Drop all attributes. + l.DroppedAttributeCount = len(l.Attributes) + l.Attributes = nil + } else if limit > 0 && len(l.Attributes) > limit { + l.DroppedAttributeCount = len(l.Attributes) - limit + l.Attributes = l.Attributes[:limit] } - s.links.add(Link{link.SpanContext, link.Attributes, droppedAttributeCount}) + s.mu.Lock() + s.links.add(l) + s.mu.Unlock() } // DroppedAttributes returns the number of attributes dropped by the span @@ -610,7 +699,7 @@ func (s *recordingSpan) snapshot() ReadOnlySpan { defer s.mu.Unlock() sd.endTime = s.endTime - sd.instrumentationLibrary = s.tracer.instrumentationLibrary + sd.instrumentationScope = s.tracer.instrumentationScope sd.name = s.name sd.parent = s.parent sd.resource = s.tracer.provider.resource diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go new file mode 100644 index 0000000000..aa4d4221db --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go @@ -0,0 +1,125 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "go.opentelemetry.io/otel/sdk/internal/env" + +const ( + // DefaultAttributeValueLengthLimit is the default maximum allowed + // attribute value length, unlimited. + DefaultAttributeValueLengthLimit = -1 + + // DefaultAttributeCountLimit is the default maximum number of attributes + // a span can have. + DefaultAttributeCountLimit = 128 + + // DefaultEventCountLimit is the default maximum number of events a span + // can have. + DefaultEventCountLimit = 128 + + // DefaultLinkCountLimit is the default maximum number of links a span can + // have. + DefaultLinkCountLimit = 128 + + // DefaultAttributePerEventCountLimit is the default maximum number of + // attributes a span event can have. + DefaultAttributePerEventCountLimit = 128 + + // DefaultAttributePerLinkCountLimit is the default maximum number of + // attributes a span link can have. + DefaultAttributePerLinkCountLimit = 128 +) + +// SpanLimits represents the limits of a span. +type SpanLimits struct { + // AttributeValueLengthLimit is the maximum allowed attribute value length. + // + // This limit only applies to string and string slice attribute values. + // Any string longer than this value will be truncated to this length. + // + // Setting this to a negative value means no limit is applied. + AttributeValueLengthLimit int + + // AttributeCountLimit is the maximum allowed span attribute count. Any + // attribute added to a span once this limit is reached will be dropped. + // + // Setting this to zero means no attributes will be recorded. + // + // Setting this to a negative value means no limit is applied. + AttributeCountLimit int + + // EventCountLimit is the maximum allowed span event count. Any event + // added to a span once this limit is reached means it will be added but + // the oldest event will be dropped. + // + // Setting this to zero means no events we be recorded. + // + // Setting this to a negative value means no limit is applied. + EventCountLimit int + + // LinkCountLimit is the maximum allowed span link count. Any link added + // to a span once this limit is reached means it will be added but the + // oldest link will be dropped. + // + // Setting this to zero means no links we be recorded. + // + // Setting this to a negative value means no limit is applied. + LinkCountLimit int + + // AttributePerEventCountLimit is the maximum number of attributes allowed + // per span event. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for events. + // + // Setting this to a negative value means no limit is applied. + AttributePerEventCountLimit int + + // AttributePerLinkCountLimit is the maximum number of attributes allowed + // per span link. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for links. + // + // Setting this to a negative value means no limit is applied. + AttributePerLinkCountLimit int +} + +// NewSpanLimits returns a SpanLimits with all limits set to the value their +// corresponding environment variable holds, or the default if unset. +// +// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT +// (default: unlimited) +// +// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128) +// +// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128) +// +// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default: +// 128) +// +// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128) +// +// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128) +func NewSpanLimits() SpanLimits { + return SpanLimits{ + AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit), + AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit), + EventCountLimit: env.SpanEventCount(DefaultEventCountLimit), + LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit), + AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit), + AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit), + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go index b649a2ff04..e6ae193521 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go @@ -64,4 +64,9 @@ type spanProcessorState struct { sp SpanProcessor state *sync.Once } + +func newSpanProcessorState(sp SpanProcessor) *spanProcessorState { + return &spanProcessorState{sp: sp, state: &sync.Once{}} +} + type spanProcessorStates []*spanProcessorState diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 5b8ab43be3..f17d924b89 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -23,8 +23,8 @@ import ( ) type tracer struct { - provider *TracerProvider - instrumentationLibrary instrumentation.Library + provider *TracerProvider + instrumentationScope instrumentation.Scope } var _ trace.Tracer = &tracer{} @@ -37,6 +37,11 @@ var _ trace.Tracer = &tracer{} func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { config := trace.NewSpanStartConfig(options...) + if ctx == nil { + // Prevent trace.ContextWithSpan from panicking. + ctx = context.Background() + } + // For local spans created by this SDK, track child span count. if p := trace.SpanFromContext(ctx); p != nil { if sdkSpan, ok := p.(*recordingSpan); ok { @@ -46,7 +51,7 @@ func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanS s := tr.newSpan(ctx, name, &config) if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { - sps, _ := tr.provider.spanProcessors.Load().(spanProcessorStates) + sps := tr.provider.spanProcessors.Load().(spanProcessorStates) for _, sp := range sps { sp.sp.OnStart(ctx, rw) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go index dcf32c148d..06673a1c04 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go @@ -32,6 +32,7 @@ type SpanRecorder struct { var _ sdktrace.SpanProcessor = (*SpanRecorder)(nil) +// NewSpanRecorder returns a new initialized SpanRecorder. func NewSpanRecorder() *SpanRecorder { return new(SpanRecorder) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go index ece4633c52..bfe73de9c4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go @@ -24,6 +24,7 @@ import ( "go.opentelemetry.io/otel/trace" ) +// SpanStubs is a slice of SpanStub use for testing an SDK. type SpanStubs []SpanStub // SpanStubsFromReadOnlySpans returns SpanStubs populated from ro. @@ -95,29 +96,29 @@ func SpanStubFromReadOnlySpan(ro tracesdk.ReadOnlySpan) SpanStub { DroppedLinks: ro.DroppedLinks(), ChildSpanCount: ro.ChildSpanCount(), Resource: ro.Resource(), - InstrumentationLibrary: ro.InstrumentationLibrary(), + InstrumentationLibrary: ro.InstrumentationScope(), } } // Snapshot returns a read-only copy of the SpanStub. func (s SpanStub) Snapshot() tracesdk.ReadOnlySpan { return spanSnapshot{ - name: s.Name, - spanContext: s.SpanContext, - parent: s.Parent, - spanKind: s.SpanKind, - startTime: s.StartTime, - endTime: s.EndTime, - attributes: s.Attributes, - events: s.Events, - links: s.Links, - status: s.Status, - droppedAttributes: s.DroppedAttributes, - droppedEvents: s.DroppedEvents, - droppedLinks: s.DroppedLinks, - childSpanCount: s.ChildSpanCount, - resource: s.Resource, - instrumentationLibrary: s.InstrumentationLibrary, + name: s.Name, + spanContext: s.SpanContext, + parent: s.Parent, + spanKind: s.SpanKind, + startTime: s.StartTime, + endTime: s.EndTime, + attributes: s.Attributes, + events: s.Events, + links: s.Links, + status: s.Status, + droppedAttributes: s.DroppedAttributes, + droppedEvents: s.DroppedEvents, + droppedLinks: s.DroppedLinks, + childSpanCount: s.ChildSpanCount, + resource: s.Resource, + instrumentationScope: s.InstrumentationLibrary, } } @@ -125,22 +126,22 @@ type spanSnapshot struct { // Embed the interface to implement the private method. tracesdk.ReadOnlySpan - name string - spanContext trace.SpanContext - parent trace.SpanContext - spanKind trace.SpanKind - startTime time.Time - endTime time.Time - attributes []attribute.KeyValue - events []tracesdk.Event - links []tracesdk.Link - status tracesdk.Status - droppedAttributes int - droppedEvents int - droppedLinks int - childSpanCount int - resource *resource.Resource - instrumentationLibrary instrumentation.Library + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []tracesdk.Event + links []tracesdk.Link + status tracesdk.Status + droppedAttributes int + droppedEvents int + droppedLinks int + childSpanCount int + resource *resource.Resource + instrumentationScope instrumentation.Scope } func (s spanSnapshot) Name() string { return s.name } @@ -158,6 +159,9 @@ func (s spanSnapshot) DroppedLinks() int { return s.droppedLinks func (s spanSnapshot) DroppedEvents() int { return s.droppedEvents } func (s spanSnapshot) ChildSpanCount() int { return s.childSpanCount } func (s spanSnapshot) Resource() *resource.Resource { return s.resource } -func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { - return s.instrumentationLibrary +func (s spanSnapshot) InstrumentationScope() instrumentation.Scope { + return s.instrumentationScope +} +func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { + return s.instrumentationScope } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go new file mode 100644 index 0000000000..b580eedeff --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -0,0 +1,336 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/semconv/internal" + +import ( + "fmt" + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// SemanticConventions are the semantic convention values defined for a +// version of the OpenTelemetry specification. +type SemanticConventions struct { + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPHostKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPServerNameKey attribute.Key + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key + NetHostIPKey attribute.Key + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerIPKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetTransportIP attribute.KeyValue + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportUnix attribute.KeyValue +} + +// NetAttributesFromHTTPRequest generates attributes of the net +// namespace as specified by the OpenTelemetry specification for a +// span. The network parameter is a string that net.Dial function +// from standard library can understand. +func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + switch network { + case "tcp", "tcp4", "tcp6": + attrs = append(attrs, sc.NetTransportTCP) + case "udp", "udp4", "udp6": + attrs = append(attrs, sc.NetTransportUDP) + case "ip", "ip4", "ip6": + attrs = append(attrs, sc.NetTransportIP) + case "unix", "unixgram", "unixpacket": + attrs = append(attrs, sc.NetTransportUnix) + default: + attrs = append(attrs, sc.NetTransportOther) + } + + peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) + if peerIP != "" { + attrs = append(attrs, sc.NetPeerIPKey.String(peerIP)) + } + if peerName != "" { + attrs = append(attrs, sc.NetPeerNameKey.String(peerName)) + } + if peerPort != 0 { + attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort)) + } + + hostIP, hostName, hostPort := "", "", 0 + for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { + hostIP, hostName, hostPort = hostIPNamePort(someHost) + if hostIP != "" || hostName != "" || hostPort != 0 { + break + } + } + if hostIP != "" { + attrs = append(attrs, sc.NetHostIPKey.String(hostIP)) + } + if hostName != "" { + attrs = append(attrs, sc.NetHostNameKey.String(hostName)) + } + if hostPort != 0 { + attrs = append(attrs, sc.NetHostPortKey.Int(hostPort)) + } + + return attrs +} + +// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. +// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized +// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the +// host portion will instead be returned in `name`. +func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { + var ( + hostPart, portPart string + parsedPort uint64 + err error + ) + if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { + hostPart, portPart = hostWithPort, "" + } + if parsedIP := net.ParseIP(hostPart); parsedIP != nil { + ip = parsedIP.String() + } else { + name = hostPart + } + if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { + port = int(parsedPort) + } + return +} + +// EndUserAttributesFromHTTPRequest generates attributes of the +// enduser namespace as specified by the OpenTelemetry specification +// for a span. +func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + if username, _, ok := request.BasicAuth(); ok { + return []attribute.KeyValue{sc.EnduserIDKey.String(username)} + } + return nil +} + +// HTTPClientAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the client side. +func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + // remove any username/password info that may be in the URL + // before adding it to the attributes + userinfo := request.URL.User + request.URL.User = nil + + attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String())) + + // restore any username/password info that was removed + request.URL.User = userinfo + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +} + +func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if ua := request.UserAgent(); ua != "" { + attrs = append(attrs, sc.HTTPUserAgentKey.String(ua)) + } + if request.ContentLength > 0 { + attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength)) + } + + return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) +} + +func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality + attrs := []attribute.KeyValue{} + + if request.TLS != nil { + attrs = append(attrs, sc.HTTPSchemeHTTPS) + } else { + attrs = append(attrs, sc.HTTPSchemeHTTP) + } + + if request.Host != "" { + attrs = append(attrs, sc.HTTPHostKey.String(request.Host)) + } else if request.URL != nil && request.URL.Host != "" { + attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host)) + } + + flavor := "" + if request.ProtoMajor == 1 { + flavor = fmt.Sprintf("1.%d", request.ProtoMinor) + } else if request.ProtoMajor == 2 { + flavor = "2" + } + if flavor != "" { + attrs = append(attrs, sc.HTTPFlavorKey.String(flavor)) + } + + if request.Method != "" { + attrs = append(attrs, sc.HTTPMethodKey.String(request.Method)) + } else { + attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet)) + } + + return attrs +} + +// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes +// to be used with server-side HTTP metrics. +func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if serverName != "" { + attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) + } + return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) +} + +// HTTPServerAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the server side. Currently, only basic authentication is +// supported. +func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + sc.HTTPTargetKey.String(request.RequestURI), + } + + if serverName != "" { + attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) + } + if route != "" { + attrs = append(attrs, sc.HTTPRouteKey.String(route)) + } + if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { + if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 { + attrs = append(attrs, sc.HTTPClientIPKey.String(addresses[0])) + } + } + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +} + +// HTTPAttributesFromHTTPStatusCode generates attributes of the http +// namespace as specified by the OpenTelemetry specification for a +// span. +func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + sc.HTTPStatusCodeKey.Int(code), + } + return attrs +} + +type codeRange struct { + fromInclusive int + toInclusive int +} + +func (r codeRange) contains(code int) bool { + return r.fromInclusive <= code && code <= r.toInclusive +} + +var validRangesPerCategory = map[int][]codeRange{ + 1: { + {http.StatusContinue, http.StatusEarlyHints}, + }, + 2: { + {http.StatusOK, http.StatusAlreadyReported}, + {http.StatusIMUsed, http.StatusIMUsed}, + }, + 3: { + {http.StatusMultipleChoices, http.StatusUseProxy}, + {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, + }, + 4: { + {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… + {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, + {http.StatusPreconditionRequired, http.StatusTooManyRequests}, + {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, + {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, + }, + 5: { + {http.StatusInternalServerError, http.StatusLoopDetected}, + {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, + }, +} + +// SpanStatusFromHTTPStatusCode generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + return spanCode, "" +} + +// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +// Exclude 4xx for SERVER to set the appropriate status. +func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + category := code / 100 + if spanKind == trace.SpanKindServer && category == 4 { + return codes.Unset, "" + } + return spanCode, "" +} + +// validateHTTPStatusCode validates the HTTP status code and returns +// corresponding span status code. If the `code` is not a valid HTTP status +// code, returns span status Error and false. +func validateHTTPStatusCode(code int) (codes.Code, bool) { + category := code / 100 + ranges, ok := validRangesPerCategory[category] + if !ok { + return codes.Error, false + } + ok = false + for _, crange := range ranges { + ok = crange.contains(code) + if ok { + break + } + } + if !ok { + return codes.Error, false + } + if category > 0 && category < 4 { + return codes.Unset, true + } + return codes.Error, true +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go new file mode 100644 index 0000000000..12d6b520f5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go @@ -0,0 +1,404 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" + +import ( + "fmt" + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +// HTTPConv are the HTTP semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type HTTPConv struct { + NetConv *NetConv + + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPResponseContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key +} + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. The following attributes are returned if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func (c *HTTPConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + var n int + if resp.StatusCode > 0 { + n++ + } + if resp.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + if resp.StatusCode > 0 { + attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) + } + if resp.ContentLength > 0 { + attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) + } + return attrs +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue { + n := 3 // URL, peer name, proto, and method. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + if req.ContentLength > 0 { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.proto(req.Proto)) + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, c.HTTPURLKey.String(u)) + + attrs = append(attrs, c.NetConv.PeerName(peer)) + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if l := req.ContentLength; l > 0 { + attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + return attrs +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the HTTPConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + peer, peerPort := splitHostPort(req.RemoteAddr) + if peer != "" { + n++ + if peerPort > 0 { + n++ + } + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.proto(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) + if peerPort > 0 { + attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + if clientIP != "" { + attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) + } + + return attrs +} + +func (c *HTTPConv) method(method string) attribute.KeyValue { + if method == "" { + return c.HTTPMethodKey.String(http.MethodGet) + } + return c.HTTPMethodKey.String(method) +} + +func (c *HTTPConv) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return c.HTTPSchemeHTTPS + } + return c.HTTPSchemeHTTP +} + +func (c *HTTPConv) proto(proto string) attribute.KeyValue { + switch proto { + case "HTTP/1.0": + return c.HTTPFlavorKey.String("1.0") + case "HTTP/1.1": + return c.HTTPFlavorKey.String("1.1") + case "HTTP/2": + return c.HTTPFlavorKey.String("2.0") + case "HTTP/3": + return c.HTTPFlavorKey.String("3.0") + default: + return c.HTTPFlavorKey.String(proto) + } +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +// Return the request host and port from the first non-empty source. +func firstHostPort(source ...string) (host string, port int) { + for _, hostport := range source { + host, port = splitHostPort(hostport) + if host != "" || port > 0 { + break + } + } + return +} + +// RequestHeader returns the contents of h as OpenTelemetry attributes. +func (c *HTTPConv) RequestHeader(h http.Header) []attribute.KeyValue { + return c.header("http.request.header", h) +} + +// ResponseHeader returns the contents of h as OpenTelemetry attributes. +func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue { + return c.header("http.response.header", h) +} + +func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { + key := func(k string) attribute.Key { + k = strings.ToLower(k) + k = strings.ReplaceAll(k, "-", "_") + k = fmt.Sprintf("%s.%s", prefix, k) + return attribute.Key(k) + } + + attrs := make([]attribute.KeyValue, 0, len(h)) + for k, v := range h { + attrs = append(attrs, key(k).StringSlice(v)) + } + return attrs +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) { + stat, valid := validateHTTPStatusCode(code) + if !valid { + return stat, fmt.Sprintf("Invalid HTTP status code %d", code) + } + return stat, "" +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) { + stat, valid := validateHTTPStatusCode(code) + if !valid { + return stat, fmt.Sprintf("Invalid HTTP status code %d", code) + } + + if code/100 == 4 { + return codes.Unset, "" + } + return stat, "" +} + +type codeRange struct { + fromInclusive int + toInclusive int +} + +func (r codeRange) contains(code int) bool { + return r.fromInclusive <= code && code <= r.toInclusive +} + +var validRangesPerCategory = map[int][]codeRange{ + 1: { + {http.StatusContinue, http.StatusEarlyHints}, + }, + 2: { + {http.StatusOK, http.StatusAlreadyReported}, + {http.StatusIMUsed, http.StatusIMUsed}, + }, + 3: { + {http.StatusMultipleChoices, http.StatusUseProxy}, + {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, + }, + 4: { + {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… + {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, + {http.StatusPreconditionRequired, http.StatusTooManyRequests}, + {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, + {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, + }, + 5: { + {http.StatusInternalServerError, http.StatusLoopDetected}, + {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, + }, +} + +// validateHTTPStatusCode validates the HTTP status code and returns +// corresponding span status code. If the `code` is not a valid HTTP status +// code, returns span status Error and false. +func validateHTTPStatusCode(code int) (codes.Code, bool) { + category := code / 100 + ranges, ok := validRangesPerCategory[category] + if !ok { + return codes.Error, false + } + ok = false + for _, crange := range ranges { + ok = crange.contains(code) + if ok { + break + } + } + if !ok { + return codes.Error, false + } + if category > 0 && category < 4 { + return codes.Unset, true + } + return codes.Error, true +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go new file mode 100644 index 0000000000..4a711133a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go @@ -0,0 +1,324 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" + +import ( + "net" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" +) + +// NetConv are the network semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type NetConv struct { + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetSockFamilyKey attribute.Key + NetSockPeerAddrKey attribute.Key + NetSockPeerPortKey attribute.Key + NetSockHostAddrKey attribute.Key + NetSockHostPortKey attribute.Key + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportInProc attribute.KeyValue +} + +func (c *NetConv) Transport(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return c.NetTransportTCP + case "udp", "udp4", "udp6": + return c.NetTransportUDP + case "unix", "unixgram", "unixpacket": + return c.NetTransportInProc + default: + // "ip:*", "ip4:*", and "ip6:*" all are considered other. + return c.NetTransportOther + } +} + +// Host returns attributes for a network host address. +func (c *NetConv) Host(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.HostName(h)) + if p > 0 { + attrs = append(attrs, c.HostPort(int(p))) + } + return attrs +} + +// Server returns attributes for a network listener listening at address. See +// net.Listen for information about acceptable address values, address should +// be the same as the one used to create ln. If ln is nil, only network host +// attributes will be returned that describe address. Otherwise, the socket +// level information about ln will also be included. +func (c *NetConv) Server(address string, ln net.Listener) []attribute.KeyValue { + if ln == nil { + return c.Host(address) + } + + lAddr := ln.Addr() + if lAddr == nil { + return c.Host(address) + } + + hostName, hostPort := splitHostPort(address) + sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) + network := lAddr.Network() + sockFamily := family(network, sockHostAddr) + + n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) + n += positiveInt(hostPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if hostName != "" { + attr = append(attr, c.HostName(hostName)) + if hostPort > 0 { + // Only if net.host.name is set should net.host.port be. + attr = append(attr, c.HostPort(hostPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func (c *NetConv) HostName(name string) attribute.KeyValue { + return c.NetHostNameKey.String(name) +} + +func (c *NetConv) HostPort(port int) attribute.KeyValue { + return c.NetHostPortKey.Int(port) +} + +// Client returns attributes for a client network connection to address. See +// net.Dial for information about acceptable address values, address should be +// the same as the one used to create conn. If conn is nil, only network peer +// attributes will be returned that describe address. Otherwise, the socket +// level information about conn will also be included. +func (c *NetConv) Client(address string, conn net.Conn) []attribute.KeyValue { + if conn == nil { + return c.Peer(address) + } + + lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() + + var network string + switch { + case lAddr != nil: + network = lAddr.Network() + case rAddr != nil: + network = rAddr.Network() + default: + return c.Peer(address) + } + + peerName, peerPort := splitHostPort(address) + var ( + sockFamily string + sockPeerAddr string + sockPeerPort int + sockHostAddr string + sockHostPort int + ) + + if lAddr != nil { + sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) + } + + if rAddr != nil { + sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) + } + + switch { + case sockHostAddr != "": + sockFamily = family(network, sockHostAddr) + case sockPeerAddr != "": + sockFamily = family(network, sockPeerAddr) + } + + n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) + n += positiveInt(peerPort, sockPeerPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if peerName != "" { + attr = append(attr, c.PeerName(peerName)) + if peerPort > 0 { + // Only if net.peer.name is set should net.peer.port be. + attr = append(attr, c.PeerPort(peerPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockPeerAddr != "" { + attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) + if sockPeerPort > 0 { + // Only if net.sock.peer.addr is set should net.sock.peer.port be. + attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) + } + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func family(network, address string) string { + switch network { + case "unix", "unixgram", "unixpacket": + return "unix" + default: + if ip := net.ParseIP(address); ip != nil { + if ip.To4() == nil { + return "inet6" + } + return "inet" + } + } + return "" +} + +func nonZeroStr(strs ...string) int { + var n int + for _, str := range strs { + if str != "" { + n++ + } + } + return n +} + +func positiveInt(ints ...int) int { + var n int + for _, i := range ints { + if i > 0 { + n++ + } + } + return n +} + +// Peer returns attributes for a network peer address. +func (c *NetConv) Peer(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.PeerName(h)) + if p > 0 { + attrs = append(attrs, c.PeerPort(int(p))) + } + return attrs +} + +func (c *NetConv) PeerName(name string) attribute.KeyValue { + return c.NetPeerNameKey.String(name) +} + +func (c *NetConv) PeerPort(port int) attribute.KeyValue { + return c.NetPeerPortKey.Int(port) +} + +func (c *NetConv) SockPeerAddr(addr string) attribute.KeyValue { + return c.NetSockPeerAddrKey.String(addr) +} + +func (c *NetConv) SockPeerPort(port int) attribute.KeyValue { + return c.NetSockPeerPortKey.Int(port) +} + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/number/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go similarity index 60% rename from vendor/go.opentelemetry.io/otel/metric/number/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go index 0649ff875e..71a1f7748d 100644 --- a/vendor/go.opentelemetry.io/otel/metric/number/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -12,12 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package number provides a number abstraction for instruments that -either support int64 or float64 input values. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. -*/ -package number // import "go.opentelemetry.io/otel/metric/number" +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.17.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go new file mode 100644 index 0000000000..679c40c4de --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go similarity index 75% rename from vendor/go.opentelemetry.io/otel/metric/unit/unit.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go index 4615eb16f6..9b8c559de4 100644 --- a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -12,13 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -package unit // import "go.opentelemetry.io/otel/metric/unit" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" -type Unit string - -// Units defined by OpenTelemetry. const ( - Dimensionless Unit = "1" - Bytes Unit = "By" - Milliseconds Unit = "ms" + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" ) diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go similarity index 65% rename from vendor/go.opentelemetry.io/otel/metric/unit/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go index f8e723593e..d5c4b5c136 100644 --- a/vendor/go.opentelemetry.io/otel/metric/unit/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -12,9 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package unit provides units. -// -// This package is currently in a pre-GA phase. Backwards incompatible changes -// may be introduced in subsequent minor version releases as we work to track -// the evolving OpenTelemetry specification and user feedback. -package unit // import "go.opentelemetry.io/otel/metric/unit" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go new file mode 100644 index 0000000000..c60b2a6bb6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go @@ -0,0 +1,150 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package httpconv provides OpenTelemetry semantic convetions for the net/http +// package from the standard library. +package httpconv // import "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/semconv/internal/v2" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +var ( + nc = &internal.NetConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, + } + + hc = &internal.HTTPConv{ + NetConv: nc, + + EnduserIDKey: semconv.EnduserIDKey, + HTTPClientIPKey: semconv.HTTPClientIPKey, + HTTPFlavorKey: semconv.HTTPFlavorKey, + HTTPMethodKey: semconv.HTTPMethodKey, + HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, + HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, + HTTPRouteKey: semconv.HTTPRouteKey, + HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, + HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, + HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, + HTTPTargetKey: semconv.HTTPTargetKey, + HTTPURLKey: semconv.HTTPURLKey, + HTTPUserAgentKey: semconv.HTTPUserAgentKey, + } +) + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. It will return the following attributes if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func ClientResponse(resp *http.Response) []attribute.KeyValue { + return hc.ClientResponse(resp) +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func ClientRequest(req *http.Request) []attribute.KeyValue { + return hc.ClientRequest(req) +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func ClientStatus(code int) (codes.Code, string) { + return hc.ClientStatus(code) +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func ServerRequest(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequest(server, req) +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func ServerStatus(code int) (codes.Code, string) { + return hc.ServerStatus(code) +} + +// RequestHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func RequestHeader(h http.Header) []attribute.KeyValue { + return hc.RequestHeader(h) +} + +// ResponseHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func ResponseHeader(h http.Header) []attribute.KeyValue { + return hc.ResponseHeader(h) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/netconv/net.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/netconv/net.go new file mode 100644 index 0000000000..d411039584 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/netconv/net.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package netconv provides OpenTelemetry semantic convetions for the net +// package from the standard library. +package netconv // import "go.opentelemetry.io/otel/semconv/v1.17.0/netconv" + +import ( + "net" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/semconv/internal/v2" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +var nc = &internal.NetConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetSockFamilyKey: semconv.NetSockFamilyKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetSockHostAddrKey: semconv.NetSockHostAddrKey, + NetSockHostPortKey: semconv.NetSockHostPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, +} + +// Transport returns an attribute describing the transport protocol of the +// passed network. See the net.Dial for information about acceptable network +// values. +func Transport(network string) attribute.KeyValue { + return nc.Transport(network) +} + +// Client returns attributes for a client network connection to address. See +// net.Dial for information about acceptable address values, address should be +// the same as the one used to create conn. If conn is nil, only network peer +// attributes will be returned that describe address. Otherwise, the socket +// level information about conn will also be included. +func Client(address string, conn net.Conn) []attribute.KeyValue { + return nc.Client(address, conn) +} + +// Server returns attributes for a network listener listening at address. See +// net.Listen for information about acceptable address values, address should +// be the same as the one used to create ln. If ln is nil, only network host +// attributes will be returned that describe address. Otherwise, the socket +// level information about ln will also be included. +func Server(address string, ln net.Listener) []attribute.KeyValue { + return nc.Server(address, ln) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go new file mode 100644 index 0000000000..39a2eab3a6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -0,0 +1,2010 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserUserAgentKey is the attribute Key conforming to the + // "browser.user_agent" semantic conventions. It represents the full + // user-agent string provided by the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) + // AppleWebKit/537.36 (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do + // not have a mechanism to retrieve brands and platform individually from + // the User-Agent Client Hints API. To retrieve the value, the legacy + // `navigator.userAgent` API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserUserAgent returns an attribute KeyValue conforming to the +// "browser.user_agent" semantic conventions. It represents the full user-agent +// string provided by the browser +func BrowserUserAgent(val string) attribute.KeyValue { + return BrowserUserAgentKey.String(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic + // conventions. It represents the unique ID of the single function that + // this runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so consider setting `faas.id` as a span attribute instead. + // + // The exact value to use for `faas.id` depends on the cloud provider: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function in MiB. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic +// conventions. It represents the unique ID of the single function that this +// runtime instance executes. +func FaaSID(val string) attribute.KeyValue { + return FaaSIDKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function in MiB. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // Linux systems, the `machine-id` located in `/etc/machine-id` or + // `/var/lib/dbus/machine-id` may be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized Linux +// systems, the `machine-id` located in `/etc/machine-id` or +// `/var/lib/dbus/machine-id` may be used. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OtelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelScopeNameKey = attribute.Key("otel.scope.name") + + // OtelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OtelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OtelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OtelScopeName(val string) attribute.KeyValue { + return OtelScopeNameKey.String(val) +} + +// OtelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OtelScopeVersion(val string) attribute.KeyValue { + return OtelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OtelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelLibraryNameKey = attribute.Key("otel.library.name") + + // OtelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OtelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OtelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OtelLibraryName(val string) attribute.KeyValue { + return OtelLibraryNameKey.String(val) +} + +// OtelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OtelLibraryVersion(val string) attribute.KeyValue { + return OtelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go new file mode 100644 index 0000000000..42fc525d16 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go new file mode 100644 index 0000000000..8c4a7299d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -0,0 +1,3375 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable and not + // explicitly disabled via instrumentation configuration.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OtelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OtelStatusCodeKey = attribute.Key("otel.status_code") + + // OtelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OtelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OtelStatusCodeOk = OtelStatusCodeKey.String("OK") + // The operation contains an error + OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") +) + +// OtelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OtelStatusDescription(val string) attribute.KeyValue { + return OtelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function execution. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" + // semantic conventions. It represents the execution ID of the current + // function execution. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSExecution returns an attribute KeyValue conforming to the +// "faas.execution" semantic conventions. It represents the execution ID of the +// current function execution. +func FaaSExecution(val string) attribute.KeyValue { + return FaaSExecutionKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetAppProtocolNameKey is the attribute Key conforming to the + // "net.app.protocol.name" semantic conventions. It represents the + // application layer protocol used. The value SHOULD be normalized to + // lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") + + // NetAppProtocolVersionKey is the attribute Key conforming to the + // "net.app.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.app.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetAppProtocolName returns an attribute KeyValue conforming to the +// "net.app.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetAppProtocolName(val string) attribute.KeyValue { + return NetAppProtocolNameKey.String(val) +} + +// NetAppProtocolVersion returns an attribute KeyValue conforming to the +// "net.app.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetAppProtocolVersion(val string) attribute.KeyValue { + return NetAppProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. It represents the kind of HTTP protocol used. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be + // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is + // assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. It represents the value of the + // [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. It represents the value of the [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by + // the HTTP server framework as the route attribute should have + // low-cardinality and the URI path can NOT substitute it. + HTTPRouteKey = attribute.Key("http.route") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationKindKey is the attribute Key conforming to the + // "messaging.destination.kind" semantic conventions. It represents the + // kind of message destination + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceKindKey is the attribute Key conforming to the + // "messaging.source.kind" semantic conventions. It represents the kind of + // message source + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingSourceKindKey = attribute.Key("messaging.source.kind") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +var ( + // A message received from a queue + MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") + // A message received from a topic + MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go index 9b430fac0c..945c95a18a 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go @@ -15,16 +15,12 @@ package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" import ( - "fmt" - "net" "net/http" - "strconv" - "strings" - - "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/semconv/internal" + "go.opentelemetry.io/otel/trace" ) // HTTP scheme attributes. @@ -33,163 +29,60 @@ var ( HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) +var sc = &internal.SemanticConventions{ + EnduserIDKey: EnduserIDKey, + HTTPClientIPKey: HTTPClientIPKey, + HTTPFlavorKey: HTTPFlavorKey, + HTTPHostKey: HTTPHostKey, + HTTPMethodKey: HTTPMethodKey, + HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, + HTTPRouteKey: HTTPRouteKey, + HTTPSchemeHTTP: HTTPSchemeHTTP, + HTTPSchemeHTTPS: HTTPSchemeHTTPS, + HTTPServerNameKey: HTTPServerNameKey, + HTTPStatusCodeKey: HTTPStatusCodeKey, + HTTPTargetKey: HTTPTargetKey, + HTTPURLKey: HTTPURLKey, + HTTPUserAgentKey: HTTPUserAgentKey, + NetHostIPKey: NetHostIPKey, + NetHostNameKey: NetHostNameKey, + NetHostPortKey: NetHostPortKey, + NetPeerIPKey: NetPeerIPKey, + NetPeerNameKey: NetPeerNameKey, + NetPeerPortKey: NetPeerPortKey, + NetTransportIP: NetTransportIP, + NetTransportOther: NetTransportOther, + NetTransportTCP: NetTransportTCP, + NetTransportUDP: NetTransportUDP, + NetTransportUnix: NetTransportUnix, +} + // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - switch network { - case "tcp", "tcp4", "tcp6": - attrs = append(attrs, NetTransportTCP) - case "udp", "udp4", "udp6": - attrs = append(attrs, NetTransportUDP) - case "ip", "ip4", "ip6": - attrs = append(attrs, NetTransportIP) - case "unix", "unixgram", "unixpacket": - attrs = append(attrs, NetTransportUnix) - default: - attrs = append(attrs, NetTransportOther) - } - - peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) - if peerIP != "" { - attrs = append(attrs, NetPeerIPKey.String(peerIP)) - } - if peerName != "" { - attrs = append(attrs, NetPeerNameKey.String(peerName)) - } - if peerPort != 0 { - attrs = append(attrs, NetPeerPortKey.Int(peerPort)) - } - - hostIP, hostName, hostPort := "", "", 0 - for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { - hostIP, hostName, hostPort = hostIPNamePort(someHost) - if hostIP != "" || hostName != "" || hostPort != 0 { - break - } - } - if hostIP != "" { - attrs = append(attrs, NetHostIPKey.String(hostIP)) - } - if hostName != "" { - attrs = append(attrs, NetHostNameKey.String(hostName)) - } - if hostPort != 0 { - attrs = append(attrs, NetHostPortKey.Int(hostPort)) - } - - return attrs -} - -// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. -// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized -// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the -// host portion will instead be returned in `name`. -func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { - var ( - hostPart, portPart string - parsedPort uint64 - err error - ) - if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { - hostPart, portPart = hostWithPort, "" - } - if parsedIP := net.ParseIP(hostPart); parsedIP != nil { - ip = parsedIP.String() - } else { - name = hostPart - } - if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { - port = int(parsedPort) - } - return + return sc.NetAttributesFromHTTPRequest(network, request) } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - if username, _, ok := request.BasicAuth(); ok { - return []attribute.KeyValue{EnduserIDKey.String(username)} - } - return nil + return sc.EndUserAttributesFromHTTPRequest(request) } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - if request.Method != "" { - attrs = append(attrs, HTTPMethodKey.String(request.Method)) - } else { - attrs = append(attrs, HTTPMethodKey.String(http.MethodGet)) - } - - // remove any username/password info that may be in the URL - // before adding it to the attributes - userinfo := request.URL.User - request.URL.User = nil - - attrs = append(attrs, HTTPURLKey.String(request.URL.String())) - - // restore any username/password info that was removed - request.URL.User = userinfo - - return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) -} - -func httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if ua := request.UserAgent(); ua != "" { - attrs = append(attrs, HTTPUserAgentKey.String(ua)) - } - if request.ContentLength > 0 { - attrs = append(attrs, HTTPRequestContentLengthKey.Int64(request.ContentLength)) - } - - return append(attrs, httpBasicAttributesFromHTTPRequest(request)...) -} - -func httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality - attrs := []attribute.KeyValue{} - - if request.TLS != nil { - attrs = append(attrs, HTTPSchemeHTTPS) - } else { - attrs = append(attrs, HTTPSchemeHTTP) - } - - if request.Host != "" { - attrs = append(attrs, HTTPHostKey.String(request.Host)) - } - - flavor := "" - if request.ProtoMajor == 1 { - flavor = fmt.Sprintf("1.%d", request.ProtoMinor) - } else if request.ProtoMajor == 2 { - flavor = "2" - } - if flavor != "" { - attrs = append(attrs, HTTPFlavorKey.String(flavor)) - } - - return attrs + return sc.HTTPClientAttributesFromHTTPRequest(request) } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if serverName != "" { - attrs = append(attrs, HTTPServerNameKey.String(serverName)) - } - return append(attrs, httpBasicAttributesFromHTTPRequest(request)...) + return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) } // HTTPServerAttributesFromHTTPRequest generates attributes of the @@ -197,116 +90,25 @@ func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http. // a span on the server side. Currently, only basic authentication is // supported. func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - HTTPMethodKey.String(request.Method), - HTTPTargetKey.String(request.RequestURI), - } - - if serverName != "" { - attrs = append(attrs, HTTPServerNameKey.String(serverName)) - } - if route != "" { - attrs = append(attrs, HTTPRouteKey.String(route)) - } - if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { - if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 { - attrs = append(attrs, HTTPClientIPKey.String(addresses[0])) - } - } - - return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) + return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - HTTPStatusCodeKey.Int(code), - } - return attrs -} - -type codeRange struct { - fromInclusive int - toInclusive int -} - -func (r codeRange) contains(code int) bool { - return r.fromInclusive <= code && code <= r.toInclusive -} - -var validRangesPerCategory = map[int][]codeRange{ - 1: { - {http.StatusContinue, http.StatusEarlyHints}, - }, - 2: { - {http.StatusOK, http.StatusAlreadyReported}, - {http.StatusIMUsed, http.StatusIMUsed}, - }, - 3: { - {http.StatusMultipleChoices, http.StatusUseProxy}, - {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, - }, - 4: { - {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… - {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, - {http.StatusPreconditionRequired, http.StatusTooManyRequests}, - {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, - {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, - }, - 5: { - {http.StatusInternalServerError, http.StatusLoopDetected}, - {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, - }, + return sc.HTTPAttributesFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - return spanCode, "" + return internal.SpanStatusFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message // as specified by the OpenTelemetry specification for a span. // Exclude 4xx for SERVER to set the appropriate status. func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - category := code / 100 - if spanKind == trace.SpanKindServer && category == 4 { - return codes.Unset, "" - } - return spanCode, "" -} - -// Validates the HTTP status code and returns corresponding span status code. -// If the `code` is not a valid HTTP status code, returns span status Error -// and false. -func validateHTTPStatusCode(code int) (codes.Code, bool) { - category := code / 100 - ranges, ok := validRangesPerCategory[category] - if !ok { - return codes.Error, false - } - ok = false - for _, crange := range ranges { - ok = crange.contains(code) - if ok { - break - } - } - if !ok { - return codes.Error, false - } - if category > 0 && category < 4 { - return codes.Unset, true - } - return codes.Error, true + return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go index ec8b463d98..64b6c46a58 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go @@ -17,4 +17,4 @@ package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/v1.7.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.7.0" diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go index 28b4f5e4d8..caf7249de8 100644 --- a/vendor/go.opentelemetry.io/otel/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -31,9 +31,12 @@ func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { // If none is registered then an instance of NoopTracerProvider is returned. // // Use the trace provider to create a named tracer. E.g. -// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// // or -// tracer := otel.Tracer("example.com/foo") +// +// tracer := otel.Tracer("example.com/foo") func GetTracerProvider() trace.TracerProvider { return global.TracerProvider() } diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index bcc333e04e..cb3efbb9ad 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -25,6 +25,7 @@ type TracerConfig struct { instrumentationVersion string // Schema URL of the telemetry emitted by the Tracer. schemaURL string + attrs attribute.Set } // InstrumentationVersion returns the version of the library providing instrumentation. @@ -32,6 +33,12 @@ func (t *TracerConfig) InstrumentationVersion() string { return t.instrumentationVersion } +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + // SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. func (t *TracerConfig) SchemaURL() string { return t.schemaURL @@ -124,7 +131,7 @@ func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { } // SpanStartOption applies an option to a SpanConfig. These options are applicable -// only when the span is created +// only when the span is created. type SpanStartOption interface { applySpanStart(SpanConfig) SpanConfig } @@ -307,6 +314,16 @@ func WithInstrumentationVersion(version string) TracerOption { }) } +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + // WithSchemaURL sets the schema URL for the Tracer. func WithSchemaURL(schemaURL string) TracerOption { return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index 391417718f..ab0346f966 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -17,7 +17,7 @@ Package trace provides an implementation of the tracing part of the OpenTelemetry API. To participate in distributed traces a Span needs to be created for the -operation being performed as part of a traced workflow. It its simplest form: +operation being performed as part of a traced workflow. In its simplest form: var tracer trace.Tracer diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index ad9a9fc5be..73950f2077 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -85,5 +85,5 @@ func (noopSpan) AddEvent(string, ...EventOption) {} // SetName does nothing. func (noopSpan) SetName(string) {} -// TracerProvider returns a no-op TracerProvider +// TracerProvider returns a no-op TracerProvider. func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 0923ceb98d..4aa94f79f4 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -63,7 +63,7 @@ func (t TraceID) MarshalJSON() ([]byte, error) { return json.Marshal(t.String()) } -// String returns the hex string representation form of a TraceID +// String returns the hex string representation form of a TraceID. func (t TraceID) String() string { return hex.EncodeToString(t[:]) } @@ -86,7 +86,7 @@ func (s SpanID) MarshalJSON() ([]byte, error) { return json.Marshal(s.String()) } -// String returns the hex string representation form of a SpanID +// String returns the hex string representation form of a SpanID. func (s SpanID) String() string { return hex.EncodeToString(s[:]) } @@ -151,7 +151,7 @@ func decodeHex(h string, b []byte) error { return nil } -// TraceFlags contains flags that can be set on a SpanContext +// TraceFlags contains flags that can be set on a SpanContext. type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. // IsSampled returns if the sampling bit is set in the TraceFlags. @@ -160,7 +160,7 @@ func (tf TraceFlags) IsSampled() bool { } // WithSampled sets the sampling bit in a new copy of the TraceFlags. -func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. if sampled { return tf | FlagsSampled } @@ -174,7 +174,7 @@ func (tf TraceFlags) MarshalJSON() ([]byte, error) { return json.Marshal(tf.String()) } -// String returns the hex string representation form of TraceFlags +// String returns the hex string representation form of TraceFlags. func (tf TraceFlags) String() string { return hex.EncodeToString([]byte{byte(tf)}[:]) } @@ -364,8 +364,9 @@ type Span interface { SpanContext() SpanContext // SetStatus sets the status of the Span in the form of a code and a - // description, overriding previous values set. The description is only - // included in a status when the code is for an error. + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. SetStatus(code codes.Code, description string) // SetName sets the Span name. @@ -386,16 +387,16 @@ type Span interface { // // For example, a Link is used in the following situations: // -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. type Link struct { // SpanContext of the linked Span. SpanContext SpanContext @@ -503,17 +504,48 @@ type Tracer interface { Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) } -// TracerProvider provides access to instrumentation Tracers. +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). // // Warning: methods may be added to this interface in minor releases. type TracerProvider interface { - // Tracer creates an implementation of the Tracer interface. - // The instrumentationName must be the name of the library providing - // instrumentation. This name may be the same as the instrumented code - // only if that code provides built-in instrumentation. If the - // instrumentationName is empty, then a implementation defined default - // name will be used instead. + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. // - // This method must be concurrency safe. - Tracer(instrumentationName string, opts ...TracerOption) Tracer + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer } diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 86fc62c1d8..ca68a82e5f 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -21,7 +21,7 @@ import ( "strings" ) -var ( +const ( maxListMembers = 32 listDelimiter = "," @@ -32,10 +32,6 @@ var ( withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` - keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) - valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) - memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) - errInvalidKey errorConst = "invalid tracestate key" errInvalidValue errorConst = "invalid tracestate value" errInvalidMember errorConst = "invalid tracestate list-member" @@ -43,6 +39,12 @@ var ( errDuplicate errorConst = "duplicate list-member in tracestate" ) +var ( + keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) + valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) + memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) +) + type member struct { Key string Value string @@ -68,7 +70,6 @@ func parseMember(m string) (member, error) { Key: matches[1], Value: matches[4], }, nil - } // String encodes member into a string compliant with the W3C Trace Context @@ -171,7 +172,8 @@ func (ts TraceState) Get(key string) string { // specification an error is returned with the original TraceState. // // If adding a new list-member means the TraceState would have more members -// than is allowed an error is returned instead with the original TraceState. +// then is allowed, the new list-member will be inserted and the right-most +// list-member will be dropped in the returned TraceState. func (ts TraceState) Insert(key, value string) (TraceState, error) { m, err := newMember(key, value) if err != nil { @@ -179,17 +181,10 @@ func (ts TraceState) Insert(key, value string) (TraceState, error) { } cTS := ts.Delete(key) - if cTS.Len()+1 > maxListMembers { - // TODO (MrAlias): When the second version of the Trace Context - // specification is published this needs to not return an error. - // Instead it should drop the "right-most" member and insert the new - // member at the front. - // - // https://github.com/w3c/trace-context/pull/448 - return ts, fmt.Errorf("failed to insert: %w", errMemberNumber) + if cTS.Len()+1 <= maxListMembers { + cTS.list = append(cTS.list, member{}) } - - cTS.list = append(cTS.list, member{}) + // When the number of members exceeds capacity, drop the "right-most". copy(cTS.list[1:], cTS.list) cTS.list[0] = m diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index a09bcbb5e8..0e8e5e0232 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.4.1" + return "1.14.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 3f06f29934..40df1fae41 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,10 +14,11 @@ module-sets: stable-v1: - version: v1.4.1 + version: v1.14.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/example/fib - go.opentelemetry.io/otel/example/jaeger - go.opentelemetry.io/otel/example/namedtracer @@ -34,27 +35,23 @@ module-sets: - go.opentelemetry.io/otel/trace - go.opentelemetry.io/otel/sdk experimental-metrics: - version: v0.27.0 + version: v0.37.0 modules: + - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/otlp/otlpmetric - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/prometheus - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric - - go.opentelemetry.io/otel/internal/metric - go.opentelemetry.io/otel/metric - - go.opentelemetry.io/otel/sdk/export/metric - go.opentelemetry.io/otel/sdk/metric - experimental-schema: - version: v0.0.2 - modules: - - go.opentelemetry.io/otel/schema - bridge: - version: v0.27.1 - modules: - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/view + experimental-schema: + version: v0.0.4 + modules: + - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_config.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_config.pb.go deleted file mode 100644 index 07f7e9b1fa..0000000000 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_config.pb.go +++ /dev/null @@ -1,573 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.23.0 -// protoc v3.13.0 -// source: opentelemetry/proto/trace/v1/trace_config.proto - -package v1 - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// How spans should be sampled: -// - Always off -// - Always on -// - Always follow the parent Span's decision (off if no parent). -type ConstantSampler_ConstantDecision int32 - -const ( - ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 - ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 - ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 -) - -// Enum value maps for ConstantSampler_ConstantDecision. -var ( - ConstantSampler_ConstantDecision_name = map[int32]string{ - 0: "ALWAYS_OFF", - 1: "ALWAYS_ON", - 2: "ALWAYS_PARENT", - } - ConstantSampler_ConstantDecision_value = map[string]int32{ - "ALWAYS_OFF": 0, - "ALWAYS_ON": 1, - "ALWAYS_PARENT": 2, - } -) - -func (x ConstantSampler_ConstantDecision) Enum() *ConstantSampler_ConstantDecision { - p := new(ConstantSampler_ConstantDecision) - *p = x - return p -} - -func (x ConstantSampler_ConstantDecision) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ConstantSampler_ConstantDecision) Descriptor() protoreflect.EnumDescriptor { - return file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes[0].Descriptor() -} - -func (ConstantSampler_ConstantDecision) Type() protoreflect.EnumType { - return &file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes[0] -} - -func (x ConstantSampler_ConstantDecision) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ConstantSampler_ConstantDecision.Descriptor instead. -func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{1, 0} -} - -// Global configuration of the trace service. All fields must be specified, or -// the default (zero) values will be used for each type. -type TraceConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The global default sampler used to make decisions on span sampling. - // - // Types that are assignable to Sampler: - // *TraceConfig_ConstantSampler - // *TraceConfig_TraceIdRatioBased - // *TraceConfig_RateLimitingSampler - Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` - // The global default max number of attributes per span. - MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` - // The global default max number of annotation events per span. - MaxNumberOfTimedEvents int64 `protobuf:"varint,5,opt,name=max_number_of_timed_events,json=maxNumberOfTimedEvents,proto3" json:"max_number_of_timed_events,omitempty"` - // The global default max number of attributes per timed event. - MaxNumberOfAttributesPerTimedEvent int64 `protobuf:"varint,6,opt,name=max_number_of_attributes_per_timed_event,json=maxNumberOfAttributesPerTimedEvent,proto3" json:"max_number_of_attributes_per_timed_event,omitempty"` - // The global default max number of link entries per span. - MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` - // The global default max number of attributes per span. - MaxNumberOfAttributesPerLink int64 `protobuf:"varint,8,opt,name=max_number_of_attributes_per_link,json=maxNumberOfAttributesPerLink,proto3" json:"max_number_of_attributes_per_link,omitempty"` -} - -func (x *TraceConfig) Reset() { - *x = TraceConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TraceConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TraceConfig) ProtoMessage() {} - -func (x *TraceConfig) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TraceConfig.ProtoReflect.Descriptor instead. -func (*TraceConfig) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{0} -} - -func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { - if m != nil { - return m.Sampler - } - return nil -} - -func (x *TraceConfig) GetConstantSampler() *ConstantSampler { - if x, ok := x.GetSampler().(*TraceConfig_ConstantSampler); ok { - return x.ConstantSampler - } - return nil -} - -func (x *TraceConfig) GetTraceIdRatioBased() *TraceIdRatioBased { - if x, ok := x.GetSampler().(*TraceConfig_TraceIdRatioBased); ok { - return x.TraceIdRatioBased - } - return nil -} - -func (x *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { - if x, ok := x.GetSampler().(*TraceConfig_RateLimitingSampler); ok { - return x.RateLimitingSampler - } - return nil -} - -func (x *TraceConfig) GetMaxNumberOfAttributes() int64 { - if x != nil { - return x.MaxNumberOfAttributes - } - return 0 -} - -func (x *TraceConfig) GetMaxNumberOfTimedEvents() int64 { - if x != nil { - return x.MaxNumberOfTimedEvents - } - return 0 -} - -func (x *TraceConfig) GetMaxNumberOfAttributesPerTimedEvent() int64 { - if x != nil { - return x.MaxNumberOfAttributesPerTimedEvent - } - return 0 -} - -func (x *TraceConfig) GetMaxNumberOfLinks() int64 { - if x != nil { - return x.MaxNumberOfLinks - } - return 0 -} - -func (x *TraceConfig) GetMaxNumberOfAttributesPerLink() int64 { - if x != nil { - return x.MaxNumberOfAttributesPerLink - } - return 0 -} - -type isTraceConfig_Sampler interface { - isTraceConfig_Sampler() -} - -type TraceConfig_ConstantSampler struct { - ConstantSampler *ConstantSampler `protobuf:"bytes,1,opt,name=constant_sampler,json=constantSampler,proto3,oneof"` -} - -type TraceConfig_TraceIdRatioBased struct { - TraceIdRatioBased *TraceIdRatioBased `protobuf:"bytes,2,opt,name=trace_id_ratio_based,json=traceIdRatioBased,proto3,oneof"` -} - -type TraceConfig_RateLimitingSampler struct { - RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"` -} - -func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} - -func (*TraceConfig_TraceIdRatioBased) isTraceConfig_Sampler() {} - -func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} - -// Sampler that always makes a constant decision on span sampling. -type ConstantSampler struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` -} - -func (x *ConstantSampler) Reset() { - *x = ConstantSampler{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ConstantSampler) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConstantSampler) ProtoMessage() {} - -func (x *ConstantSampler) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConstantSampler.ProtoReflect.Descriptor instead. -func (*ConstantSampler) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{1} -} - -func (x *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { - if x != nil { - return x.Decision - } - return ConstantSampler_ALWAYS_OFF -} - -// Sampler that tries to uniformly sample traces with a given ratio. -// The ratio of sampling a trace is equal to that of the specified ratio. -type TraceIdRatioBased struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The desired ratio of sampling. Must be within [0.0, 1.0]. - SamplingRatio float64 `protobuf:"fixed64,1,opt,name=samplingRatio,proto3" json:"samplingRatio,omitempty"` -} - -func (x *TraceIdRatioBased) Reset() { - *x = TraceIdRatioBased{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TraceIdRatioBased) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TraceIdRatioBased) ProtoMessage() {} - -func (x *TraceIdRatioBased) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TraceIdRatioBased.ProtoReflect.Descriptor instead. -func (*TraceIdRatioBased) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{2} -} - -func (x *TraceIdRatioBased) GetSamplingRatio() float64 { - if x != nil { - return x.SamplingRatio - } - return 0 -} - -// Sampler that tries to sample with a rate per time window. -type RateLimitingSampler struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Rate per second. - Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` -} - -func (x *RateLimitingSampler) Reset() { - *x = RateLimitingSampler{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RateLimitingSampler) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RateLimitingSampler) ProtoMessage() {} - -func (x *RateLimitingSampler) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RateLimitingSampler.ProtoReflect.Descriptor instead. -func (*RateLimitingSampler) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{3} -} - -func (x *RateLimitingSampler) GetQps() int64 { - if x != nil { - return x.Qps - } - return 0 -} - -var File_opentelemetry_proto_trace_v1_trace_config_proto protoreflect.FileDescriptor - -var file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x1c, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x22, - 0x84, 0x05, 0x0a, 0x0b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x5a, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x14, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x5f, 0x62, 0x61, - 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, - 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x00, 0x52, 0x11, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x49, 0x64, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, - 0x67, 0x0a, 0x15, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, - 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, - 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x72, 0x48, 0x00, 0x52, 0x13, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, - 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x12, 0x3a, 0x0a, 0x1a, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, - 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x4f, 0x66, 0x54, 0x69, 0x6d, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x54, 0x0a, - 0x28, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x61, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x22, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x64, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x10, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4c, 0x69, 0x6e, - 0x6b, 0x73, 0x12, 0x47, 0x0a, 0x21, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x5f, 0x6f, 0x66, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x70, - 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1c, 0x6d, - 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x4c, 0x69, 0x6e, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x08, 0x64, 0x65, - 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x65, - 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x74, 0x44, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x4c, - 0x57, 0x41, 0x59, 0x53, 0x5f, 0x4f, 0x46, 0x46, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, - 0x57, 0x41, 0x59, 0x53, 0x5f, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x4c, 0x57, - 0x41, 0x59, 0x53, 0x5f, 0x50, 0x41, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x22, 0x39, 0x0a, 0x11, - 0x54, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x61, 0x73, 0x65, - 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x61, 0x74, - 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, - 0x6e, 0x67, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22, 0x27, 0x0a, 0x13, 0x52, 0x61, 0x74, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x10, - 0x0a, 0x03, 0x71, 0x70, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x71, 0x70, 0x73, - 0x42, 0x68, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, - 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x42, 0x10, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescOnce sync.Once - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData = file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc -) - -func file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP() []byte { - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescOnce.Do(func() { - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData) - }) - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData -} - -var file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_opentelemetry_proto_trace_v1_trace_config_proto_goTypes = []interface{}{ - (ConstantSampler_ConstantDecision)(0), // 0: opentelemetry.proto.trace.v1.ConstantSampler.ConstantDecision - (*TraceConfig)(nil), // 1: opentelemetry.proto.trace.v1.TraceConfig - (*ConstantSampler)(nil), // 2: opentelemetry.proto.trace.v1.ConstantSampler - (*TraceIdRatioBased)(nil), // 3: opentelemetry.proto.trace.v1.TraceIdRatioBased - (*RateLimitingSampler)(nil), // 4: opentelemetry.proto.trace.v1.RateLimitingSampler -} -var file_opentelemetry_proto_trace_v1_trace_config_proto_depIdxs = []int32{ - 2, // 0: opentelemetry.proto.trace.v1.TraceConfig.constant_sampler:type_name -> opentelemetry.proto.trace.v1.ConstantSampler - 3, // 1: opentelemetry.proto.trace.v1.TraceConfig.trace_id_ratio_based:type_name -> opentelemetry.proto.trace.v1.TraceIdRatioBased - 4, // 2: opentelemetry.proto.trace.v1.TraceConfig.rate_limiting_sampler:type_name -> opentelemetry.proto.trace.v1.RateLimitingSampler - 0, // 3: opentelemetry.proto.trace.v1.ConstantSampler.decision:type_name -> opentelemetry.proto.trace.v1.ConstantSampler.ConstantDecision - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_opentelemetry_proto_trace_v1_trace_config_proto_init() } -func file_opentelemetry_proto_trace_v1_trace_config_proto_init() { - if File_opentelemetry_proto_trace_v1_trace_config_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TraceConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConstantSampler); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TraceIdRatioBased); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RateLimitingSampler); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*TraceConfig_ConstantSampler)(nil), - (*TraceConfig_TraceIdRatioBased)(nil), - (*TraceConfig_RateLimitingSampler)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc, - NumEnums: 1, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_opentelemetry_proto_trace_v1_trace_config_proto_goTypes, - DependencyIndexes: file_opentelemetry_proto_trace_v1_trace_config_proto_depIdxs, - EnumInfos: file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes, - MessageInfos: file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes, - }.Build() - File_opentelemetry_proto_trace_v1_trace_config_proto = out.File - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc = nil - file_opentelemetry_proto_trace_v1_trace_config_proto_goTypes = nil - file_opentelemetry_proto_trace_v1_trace_config_proto_depIdxs = nil -} diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go index 402614f1d3..fc285c089e 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.23.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opentelemetry/proto/collector/trace/v1/trace_service.proto package v1 import ( - proto "github.com/golang/protobuf/proto" v1 "go.opentelemetry.io/proto/otlp/trace/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type ExportTraceServiceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -96,6 +91,23 @@ type ExportTraceServiceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + PartialSuccess *ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"` } func (x *ExportTraceServiceResponse) Reset() { @@ -130,6 +142,79 @@ func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{1} } +func (x *ExportTraceServiceResponse) GetPartialSuccess() *ExportTracePartialSuccess { + if x != nil { + return x.PartialSuccess + } + return nil +} + +type ExportTracePartialSuccess struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of rejected spans. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"` + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ExportTracePartialSuccess) Reset() { + *x = ExportTracePartialSuccess{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTracePartialSuccess) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTracePartialSuccess) ProtoMessage() {} + +func (x *ExportTracePartialSuccess) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTracePartialSuccess.ProtoReflect.Descriptor instead. +func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ExportTracePartialSuccess) GetRejectedSpans() int64 { + if x != nil { + return x.RejectedSpans + } + return 0 +} + +func (x *ExportTracePartialSuccess) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + var File_opentelemetry_proto_collector_trace_v1_trace_service_proto protoreflect.FileDescriptor var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = []byte{ @@ -149,26 +234,42 @@ var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = [] 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, - 0x1c, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xa2, 0x01, - 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x91, - 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x42, 0x73, 0x0a, 0x29, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, - 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x88, 0x01, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, + 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x67, 0x0a, 0x19, 0x45, 0x78, + 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, + 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0d, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x91, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x9c, 0x01, 0x0a, 0x29, 0x69, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, + 0x26, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -183,21 +284,23 @@ func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData } -var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = []interface{}{ (*ExportTraceServiceRequest)(nil), // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest (*ExportTraceServiceResponse)(nil), // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse - (*v1.ResourceSpans)(nil), // 2: opentelemetry.proto.trace.v1.ResourceSpans + (*ExportTracePartialSuccess)(nil), // 2: opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess + (*v1.ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans } var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = []int32{ - 2, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans - 0, // 1: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest - 1, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 3, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans + 2, // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess + 0, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest + 1, // 3: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() } @@ -230,6 +333,18 @@ func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() { return nil } } + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTracePartialSuccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -237,7 +352,7 @@ func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 3, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go index 18dff3d03e..d142c2a447 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go @@ -13,15 +13,14 @@ import ( "io" "net/http" - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // Suppress "imported and not used" errors @@ -30,7 +29,6 @@ var _ io.Reader var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage var _ = metadata.Join func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -79,7 +77,7 @@ func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMu var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -141,7 +139,7 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -161,7 +159,7 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu } var ( - pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "")) ) var ( diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go index 4e4c24c0c8..c21f2cb47c 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.17.3 +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto package v1 @@ -11,6 +15,7 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 // TraceServiceClient is the client API for TraceService service. @@ -66,7 +71,7 @@ type UnsafeTraceServiceServer interface { } func RegisterTraceServiceServer(s grpc.ServiceRegistrar, srv TraceServiceServer) { - s.RegisterService(&_TraceService_serviceDesc, srv) + s.RegisterService(&TraceService_ServiceDesc, srv) } func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -87,7 +92,10 @@ func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } -var _TraceService_serviceDesc = grpc.ServiceDesc{ +// TraceService_ServiceDesc is the grpc.ServiceDesc for TraceService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var TraceService_ServiceDesc = grpc.ServiceDesc{ ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService", HandlerType: (*TraceServiceServer)(nil), Methods: []grpc.MethodDesc{ diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go index de75b592ca..8502e607b2 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.23.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opentelemetry/proto/common/v1/common.proto package v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // AnyValue is used to represent any type of attribute value. AnyValue may contain a // primitive value such as a string or integer or it may contain an arbitrary nested // object containing arrays, key-value lists and primitives. @@ -257,6 +252,8 @@ type KeyValueList struct { // A collection of key/value pairs of key-value pairs. The list may be empty (may // contain 0 elements). + // The keys MUST be unique (it is not allowed to have more than one + // value with the same key). Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` } @@ -356,21 +353,22 @@ func (x *KeyValue) GetValue() *AnyValue { return nil } -// StringKeyValue is a pair of key/value strings. This is the simpler (and faster) version -// of KeyValue that only supports string values. -// -// Deprecated: Do not use. -type StringKeyValue struct { +// InstrumentationScope is a message representing the instrumentation scope information +// such as the fully qualified name and version. +type InstrumentationScope struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // An empty instrumentation scope name means the name is unknown. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` } -func (x *StringKeyValue) Reset() { - *x = StringKeyValue{} +func (x *InstrumentationScope) Reset() { + *x = InstrumentationScope{} if protoimpl.UnsafeEnabled { mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -378,13 +376,13 @@ func (x *StringKeyValue) Reset() { } } -func (x *StringKeyValue) String() string { +func (x *InstrumentationScope) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StringKeyValue) ProtoMessage() {} +func (*InstrumentationScope) ProtoMessage() {} -func (x *StringKeyValue) ProtoReflect() protoreflect.Message { +func (x *InstrumentationScope) ProtoReflect() protoreflect.Message { mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -396,83 +394,39 @@ func (x *StringKeyValue) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StringKeyValue.ProtoReflect.Descriptor instead. -func (*StringKeyValue) Descriptor() ([]byte, []int) { +// Deprecated: Use InstrumentationScope.ProtoReflect.Descriptor instead. +func (*InstrumentationScope) Descriptor() ([]byte, []int) { return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{4} } -func (x *StringKeyValue) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *StringKeyValue) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -// InstrumentationLibrary is a message representing the instrumentation library information -// such as the fully qualified name and version. -type InstrumentationLibrary struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // An empty instrumentation library name means the name is unknown. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` -} - -func (x *InstrumentationLibrary) Reset() { - *x = InstrumentationLibrary{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *InstrumentationLibrary) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InstrumentationLibrary) ProtoMessage() {} - -func (x *InstrumentationLibrary) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InstrumentationLibrary.ProtoReflect.Descriptor instead. -func (*InstrumentationLibrary) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{5} -} - -func (x *InstrumentationLibrary) GetName() string { +func (x *InstrumentationScope) GetName() string { if x != nil { return x.Name } return "" } -func (x *InstrumentationLibrary) GetVersion() string { +func (x *InstrumentationScope) GetVersion() string { if x != nil { return x.Version } return "" } +func (x *InstrumentationScope) GetAttributes() []*KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ @@ -518,22 +472,28 @@ var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x3c, 0x0a, 0x0e, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x18, 0x01, 0x22, 0x46, 0x0a, 0x16, 0x49, 0x6e, 0x73, - 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, - 0x61, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x42, 0x5b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, - 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x14, + 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, + 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -548,14 +508,13 @@ func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte { return file_opentelemetry_proto_common_v1_common_proto_rawDescData } -var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{ - (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue - (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue - (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList - (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue - (*StringKeyValue)(nil), // 4: opentelemetry.proto.common.v1.StringKeyValue - (*InstrumentationLibrary)(nil), // 5: opentelemetry.proto.common.v1.InstrumentationLibrary + (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue + (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue + (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList + (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue + (*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope } var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue @@ -563,11 +522,12 @@ var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ 0, // 2: opentelemetry.proto.common.v1.ArrayValue.values:type_name -> opentelemetry.proto.common.v1.AnyValue 3, // 3: opentelemetry.proto.common.v1.KeyValueList.values:type_name -> opentelemetry.proto.common.v1.KeyValue 0, // 4: opentelemetry.proto.common.v1.KeyValue.value:type_name -> opentelemetry.proto.common.v1.AnyValue - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 3, // 5: opentelemetry.proto.common.v1.InstrumentationScope.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_opentelemetry_proto_common_v1_common_proto_init() } @@ -625,19 +585,7 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { } } file_opentelemetry_proto_common_v1_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StringKeyValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_opentelemetry_proto_common_v1_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InstrumentationLibrary); i { + switch v := v.(*InstrumentationScope); i { case 0: return &v.state case 1: @@ -664,7 +612,7 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc, NumEnums: 0, - NumMessages: 6, + NumMessages: 5, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go index ac347acb23..bcc1060e3d 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.23.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opentelemetry/proto/resource/v1/resource.proto package v1 import ( - proto "github.com/golang/protobuf/proto" v1 "go.opentelemetry.io/proto/otlp/common/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -36,17 +35,15 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Resource information. type Resource struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Set of labels that describe the resource. + // Set of attributes that describe the resource. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` // dropped_attributes_count is the number of dropped attributes. If the value is 0, then // no attributes were dropped. @@ -118,14 +115,16 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x61, 0x0a, - 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01, + 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, + 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index abf0b4c168..499a43d77b 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.23.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opentelemetry/proto/trace/v1/trace.proto package v1 import ( - proto "github.com/golang/protobuf/proto" v11 "go.opentelemetry.io/proto/otlp/common/v1" v1 "go.opentelemetry.io/proto/otlp/resource/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // SpanKind is the type of span. Can be used to specify additional relationships between spans // in addition to a parent/child relationship. type Span_SpanKind int32 @@ -232,7 +227,7 @@ func (x *TracesData) GetResourceSpans() []*ResourceSpans { return nil } -// A collection of InstrumentationLibrarySpans from a Resource. +// A collection of ScopeSpans from a Resource. type ResourceSpans struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -241,11 +236,10 @@ type ResourceSpans struct { // The resource for the spans in this message. // If this field is not set then no resource info is known. Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // A list of InstrumentationLibrarySpans that originate from a resource. - InstrumentationLibrarySpans []*InstrumentationLibrarySpans `protobuf:"bytes,2,rep,name=instrumentation_library_spans,json=instrumentationLibrarySpans,proto3" json:"instrumentation_library_spans,omitempty"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "instrumentation_library_spans" field which have their own - // schema_url field. + // to the data in the "scope_spans" field which have their own schema_url field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } @@ -288,9 +282,9 @@ func (x *ResourceSpans) GetResource() *v1.Resource { return nil } -func (x *ResourceSpans) GetInstrumentationLibrarySpans() []*InstrumentationLibrarySpans { +func (x *ResourceSpans) GetScopeSpans() []*ScopeSpans { if x != nil { - return x.InstrumentationLibrarySpans + return x.ScopeSpans } return nil } @@ -302,24 +296,24 @@ func (x *ResourceSpans) GetSchemaUrl() string { return "" } -// A collection of Spans produced by an InstrumentationLibrary. -type InstrumentationLibrarySpans struct { +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The instrumentation library information for the spans in this message. - // Semantically when InstrumentationLibrary isn't set, it is equivalent with - // an empty instrumentation library name (unknown). - InstrumentationLibrary *v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library,omitempty"` - // A list of Spans that originate from an instrumentation library. + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` + // A list of Spans that originate from an instrumentation scope. Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` // This schema_url applies to all spans and span events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } -func (x *InstrumentationLibrarySpans) Reset() { - *x = InstrumentationLibrarySpans{} +func (x *ScopeSpans) Reset() { + *x = ScopeSpans{} if protoimpl.UnsafeEnabled { mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -327,13 +321,13 @@ func (x *InstrumentationLibrarySpans) Reset() { } } -func (x *InstrumentationLibrarySpans) String() string { +func (x *ScopeSpans) String() string { return protoimpl.X.MessageStringOf(x) } -func (*InstrumentationLibrarySpans) ProtoMessage() {} +func (*ScopeSpans) ProtoMessage() {} -func (x *InstrumentationLibrarySpans) ProtoReflect() protoreflect.Message { +func (x *ScopeSpans) ProtoReflect() protoreflect.Message { mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -345,39 +339,33 @@ func (x *InstrumentationLibrarySpans) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use InstrumentationLibrarySpans.ProtoReflect.Descriptor instead. -func (*InstrumentationLibrarySpans) Descriptor() ([]byte, []int) { +// Deprecated: Use ScopeSpans.ProtoReflect.Descriptor instead. +func (*ScopeSpans) Descriptor() ([]byte, []int) { return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2} } -func (x *InstrumentationLibrarySpans) GetInstrumentationLibrary() *v11.InstrumentationLibrary { +func (x *ScopeSpans) GetScope() *v11.InstrumentationScope { if x != nil { - return x.InstrumentationLibrary + return x.Scope } return nil } -func (x *InstrumentationLibrarySpans) GetSpans() []*Span { +func (x *ScopeSpans) GetSpans() []*Span { if x != nil { return x.Spans } return nil } -func (x *InstrumentationLibrarySpans) GetSchemaUrl() string { +func (x *ScopeSpans) GetSchemaUrl() string { if x != nil { return x.SchemaUrl } return "" } -// Span represents a single operation within a trace. Spans can be -// nested to form a trace tree. Spans may also be linked to other spans -// from the same or different trace and form graphs. Often, a trace -// contains a root span that describes the end-to-end latency, and one -// or more subspans for its sub-operations. A trace can also contain -// multiple root spans, or none at all. Spans do not need to be -// contiguous - there may be gaps or overlaps between spans in a trace. +// A Span represents a single operation performed by a single component of the system. // // The next available field id is 17. type Span struct { @@ -449,7 +437,9 @@ type Span struct { // "abc.com/score": 10.239 // // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/common.md#attributes + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` // dropped_attributes_count is the number of attributes that were discarded. Attributes // can be discarded because their keys are too long or because there are too many @@ -680,6 +670,8 @@ type Span_Event struct { // This field is semantically required to be set to non-empty string. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` // dropped_attributes_count is the number of dropped attributes. If the value is 0, // then no attributes were dropped. @@ -763,6 +755,8 @@ type Span_Link struct { // The trace_state associated with the link. TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` // dropped_attributes_count is the number of dropped attributes. If the value is 0, // then no attributes were dropped. @@ -855,137 +849,133 @@ var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{ 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xf4, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0x7d, 0x0a, 0x1d, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, - 0x73, 0x52, 0x1b, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, - 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x22, 0xe6, 0x01, - 0x0a, 0x1b, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x6e, 0x0a, - 0x17, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, + 0x49, 0x0a, 0x0b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0a, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, + 0x07, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, + 0x12, 0x49, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x73, + 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, + 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, + 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x55, 0x72, 0x6c, 0x22, 0x9c, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a, + 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, + 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, + 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, + 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, + 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b, + 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, + 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54, + 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, + 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x49, - 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, - 0x62, 0x72, 0x61, 0x72, 0x79, 0x52, 0x16, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x12, 0x38, 0x0a, - 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, - 0x52, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x22, 0x9c, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, + 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, + 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e, + 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, + 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, + 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, + 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, + 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xde, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, - 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, - 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, - 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, - 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, - 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, - 0x12, 0x2b, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, - 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0f, 0x65, 0x6e, - 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x47, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, - 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, - 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x40, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, - 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, - 0x6e, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, - 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, - 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xde, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x6e, - 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, - 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, - 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, - 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, - 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, - 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, - 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, - 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, - 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, - 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, - 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, - 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, - 0x4d, 0x45, 0x52, 0x10, 0x05, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, - 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, - 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, - 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, - 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, - 0x45, 0x54, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, - 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, - 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x42, 0x58, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, - 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, + 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, + 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, + 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54, + 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, + 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, + 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, + 0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53, + 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, + 0x52, 0x10, 0x05, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a, + 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, + 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, + 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, + 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08, + 0x01, 0x10, 0x02, 0x42, 0x77, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c, + 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1003,25 +993,25 @@ func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte { var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{ - (Span_SpanKind)(0), // 0: opentelemetry.proto.trace.v1.Span.SpanKind - (Status_StatusCode)(0), // 1: opentelemetry.proto.trace.v1.Status.StatusCode - (*TracesData)(nil), // 2: opentelemetry.proto.trace.v1.TracesData - (*ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans - (*InstrumentationLibrarySpans)(nil), // 4: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans - (*Span)(nil), // 5: opentelemetry.proto.trace.v1.Span - (*Status)(nil), // 6: opentelemetry.proto.trace.v1.Status - (*Span_Event)(nil), // 7: opentelemetry.proto.trace.v1.Span.Event - (*Span_Link)(nil), // 8: opentelemetry.proto.trace.v1.Span.Link - (*v1.Resource)(nil), // 9: opentelemetry.proto.resource.v1.Resource - (*v11.InstrumentationLibrary)(nil), // 10: opentelemetry.proto.common.v1.InstrumentationLibrary - (*v11.KeyValue)(nil), // 11: opentelemetry.proto.common.v1.KeyValue + (Span_SpanKind)(0), // 0: opentelemetry.proto.trace.v1.Span.SpanKind + (Status_StatusCode)(0), // 1: opentelemetry.proto.trace.v1.Status.StatusCode + (*TracesData)(nil), // 2: opentelemetry.proto.trace.v1.TracesData + (*ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans + (*ScopeSpans)(nil), // 4: opentelemetry.proto.trace.v1.ScopeSpans + (*Span)(nil), // 5: opentelemetry.proto.trace.v1.Span + (*Status)(nil), // 6: opentelemetry.proto.trace.v1.Status + (*Span_Event)(nil), // 7: opentelemetry.proto.trace.v1.Span.Event + (*Span_Link)(nil), // 8: opentelemetry.proto.trace.v1.Span.Link + (*v1.Resource)(nil), // 9: opentelemetry.proto.resource.v1.Resource + (*v11.InstrumentationScope)(nil), // 10: opentelemetry.proto.common.v1.InstrumentationScope + (*v11.KeyValue)(nil), // 11: opentelemetry.proto.common.v1.KeyValue } var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{ 3, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans 9, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource - 4, // 2: opentelemetry.proto.trace.v1.ResourceSpans.instrumentation_library_spans:type_name -> opentelemetry.proto.trace.v1.InstrumentationLibrarySpans - 10, // 3: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans.instrumentation_library:type_name -> opentelemetry.proto.common.v1.InstrumentationLibrary - 5, // 4: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans.spans:type_name -> opentelemetry.proto.trace.v1.Span + 4, // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans + 10, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope + 5, // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span 0, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind 11, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue 7, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event @@ -1068,7 +1058,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { } } file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InstrumentationLibrarySpans); i { + switch v := v.(*ScopeSpans); i { case 0: return &v.state case 1: diff --git a/vendor/modules.txt b/vendor/modules.txt index 13106eead1..efb2ec1278 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -24,6 +24,9 @@ code.cloudfoundry.org/clock # github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 ## explicit; go 1.20 github.com/AdaLogics/go-fuzz-headers +# github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 +## explicit; go 1.18 +github.com/AdamKorcz/go-118-fuzz-build/testing # github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 ## explicit; go 1.16 github.com/Azure/go-ansiterm @@ -38,6 +41,7 @@ github.com/Microsoft/go-winio/backuptar github.com/Microsoft/go-winio/internal/fs github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer +github.com/Microsoft/go-winio/pkg/bindfilter github.com/Microsoft/go-winio/pkg/etw github.com/Microsoft/go-winio/pkg/etwlogrus github.com/Microsoft/go-winio/pkg/fs @@ -239,10 +243,12 @@ github.com/containerd/cgroups/v3/cgroup2/stats # github.com/containerd/console v1.0.3 ## explicit; go 1.13 github.com/containerd/console -# github.com/containerd/containerd v1.6.24 +# github.com/containerd/containerd v1.7.6 ## explicit; go 1.19 github.com/containerd/containerd github.com/containerd/containerd/api/events +github.com/containerd/containerd/api/runtime/sandbox/v1 +github.com/containerd/containerd/api/runtime/task/v2 github.com/containerd/containerd/api/services/containers/v1 github.com/containerd/containerd/api/services/content/v1 github.com/containerd/containerd/api/services/diff/v1 @@ -251,12 +257,16 @@ github.com/containerd/containerd/api/services/images/v1 github.com/containerd/containerd/api/services/introspection/v1 github.com/containerd/containerd/api/services/leases/v1 github.com/containerd/containerd/api/services/namespaces/v1 +github.com/containerd/containerd/api/services/sandbox/v1 github.com/containerd/containerd/api/services/snapshots/v1 +github.com/containerd/containerd/api/services/streaming/v1 github.com/containerd/containerd/api/services/tasks/v1 +github.com/containerd/containerd/api/services/transfer/v1 github.com/containerd/containerd/api/services/ttrpc/events/v1 github.com/containerd/containerd/api/services/version/v1 github.com/containerd/containerd/api/types github.com/containerd/containerd/api/types/task +github.com/containerd/containerd/api/types/transfer github.com/containerd/containerd/archive github.com/containerd/containerd/archive/compression github.com/containerd/containerd/archive/tarheader @@ -269,6 +279,7 @@ github.com/containerd/containerd/contrib/nvidia github.com/containerd/containerd/contrib/seccomp/kernelversion github.com/containerd/containerd/defaults github.com/containerd/containerd/diff +github.com/containerd/containerd/diff/proxy github.com/containerd/containerd/diff/walking github.com/containerd/containerd/errdefs github.com/containerd/containerd/events @@ -291,17 +302,28 @@ github.com/containerd/containerd/oci github.com/containerd/containerd/pkg/apparmor github.com/containerd/containerd/pkg/atomicfile github.com/containerd/containerd/pkg/cap +github.com/containerd/containerd/pkg/cleanup github.com/containerd/containerd/pkg/dialer +github.com/containerd/containerd/pkg/epoch github.com/containerd/containerd/pkg/kmutex github.com/containerd/containerd/pkg/randutil github.com/containerd/containerd/pkg/runtimeoptions/v1 github.com/containerd/containerd/pkg/seccomp github.com/containerd/containerd/pkg/shutdown github.com/containerd/containerd/pkg/snapshotters +github.com/containerd/containerd/pkg/streaming +github.com/containerd/containerd/pkg/transfer +github.com/containerd/containerd/pkg/transfer/proxy +github.com/containerd/containerd/pkg/transfer/streaming github.com/containerd/containerd/pkg/ttrpcutil +github.com/containerd/containerd/pkg/unpack github.com/containerd/containerd/pkg/userns github.com/containerd/containerd/platforms github.com/containerd/containerd/plugin +github.com/containerd/containerd/protobuf +github.com/containerd/containerd/protobuf/plugin +github.com/containerd/containerd/protobuf/proto +github.com/containerd/containerd/protobuf/types github.com/containerd/containerd/reference github.com/containerd/containerd/reference/docker github.com/containerd/containerd/remotes @@ -313,7 +335,8 @@ github.com/containerd/containerd/rootfs github.com/containerd/containerd/runtime/linux/runctypes github.com/containerd/containerd/runtime/v2/runc/options github.com/containerd/containerd/runtime/v2/shim -github.com/containerd/containerd/runtime/v2/task +github.com/containerd/containerd/sandbox +github.com/containerd/containerd/sandbox/proxy github.com/containerd/containerd/services github.com/containerd/containerd/services/content/contentserver github.com/containerd/containerd/services/introspection @@ -506,7 +529,6 @@ github.com/golang/gddo/httputil/header github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 -github.com/golang/protobuf/descriptor github.com/golang/protobuf/internal/gengogrpc github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto @@ -564,11 +586,11 @@ github.com/grpc-ecosystem/go-grpc-middleware # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus -# github.com/grpc-ecosystem/grpc-gateway v1.16.0 -## explicit; go 1.14 -github.com/grpc-ecosystem/grpc-gateway/internal -github.com/grpc-ecosystem/grpc-gateway/runtime -github.com/grpc-ecosystem/grpc-gateway/utilities +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 +## explicit; go 1.17 +github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule +github.com/grpc-ecosystem/grpc-gateway/v2/runtime +github.com/grpc-ecosystem/grpc-gateway/v2/utilities # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap @@ -1088,64 +1110,67 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 -## explicit; go 1.16 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 +## explicit; go 1.18 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 -## explicit; go 1.16 +# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.40.0 +## explicit; go 1.18 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 -## explicit; go 1.16 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 +## explicit; go 1.18 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp -# go.opentelemetry.io/otel v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel v1.14.0 +## explicit; go 1.18 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes +go.opentelemetry.io/otel/exporters/otlp/internal +go.opentelemetry.io/otel/exporters/otlp/internal/envconfig go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/attribute go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/internal +go.opentelemetry.io/otel/semconv/internal/v2 +go.opentelemetry.io/otel/semconv/v1.17.0 +go.opentelemetry.io/otel/semconv/v1.17.0/httpconv +go.opentelemetry.io/otel/semconv/v1.17.0/netconv go.opentelemetry.io/otel/semconv/v1.7.0 -# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 +## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 +## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 +## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 +## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp -# go.opentelemetry.io/otel/internal/metric v0.27.0 -## explicit; go 1.16 -go.opentelemetry.io/otel/internal/metric/global -go.opentelemetry.io/otel/internal/metric/registry -# go.opentelemetry.io/otel/metric v0.27.0 -## explicit; go 1.16 +# go.opentelemetry.io/otel/metric v0.37.0 +## explicit; go 1.18 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/global -go.opentelemetry.io/otel/metric/number -go.opentelemetry.io/otel/metric/sdkapi -go.opentelemetry.io/otel/metric/unit -# go.opentelemetry.io/otel/sdk v1.4.1 -## explicit; go 1.16 +go.opentelemetry.io/otel/metric/instrument +go.opentelemetry.io/otel/metric/internal/global +# go.opentelemetry.io/otel/sdk v1.14.0 +## explicit; go 1.18 go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/trace v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel/trace v1.14.0 +## explicit; go 1.18 go.opentelemetry.io/otel/trace -# go.opentelemetry.io/proto/otlp v0.12.0 +# go.opentelemetry.io/proto/otlp v0.19.0 ## explicit; go 1.14 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 From 8cd5f04ea3c53489482e2112b9fc060aa3efc340 Mon Sep 17 00:00:00 2001 From: Bjorn Neergaard Date: Thu, 13 Jul 2023 16:54:09 -0600 Subject: [PATCH 03/11] daemon/c8d: use new containerd LabelDistributionSource constant Introduced in https://github.com/containerd/containerd/commit/dd3eedf3c35c1213292da7c6ef5e033d2a5b5d4d Signed-off-by: Bjorn Neergaard --- daemon/containerd/image_push.go | 11 ++++------- daemon/containerd/store.go | 3 ++- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/daemon/containerd/image_push.go b/daemon/containerd/image_push.go index 71b26d0834..f34fc15eb3 100644 --- a/daemon/containerd/image_push.go +++ b/daemon/containerd/image_push.go @@ -13,6 +13,7 @@ import ( cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" containerdimages "github.com/containerd/containerd/images" + containerdlabels "github.com/containerd/containerd/labels" "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" @@ -246,23 +247,19 @@ func getDigestSources(ctx context.Context, store content.Manager, digest digest. sources := extractDistributionSources(info.Labels) if sources == nil { - return nil, errdefs.NotFound(fmt.Errorf("label %q is not attached to %s", labelDistributionSource, digest.String())) + return nil, errdefs.NotFound(fmt.Errorf("label %q is not attached to %s", containerdlabels.LabelDistributionSource, digest.String())) } return sources, nil } -// TODO(vvoland): Remove and use containerd const in containerd 1.7+ -// https://github.com/containerd/containerd/pull/8224 -const labelDistributionSource = "containerd.io/distribution.source." - func extractDistributionSources(labels map[string]string) []distributionSource { var sources []distributionSource // Check if this blob has a distributionSource label // if yes, read it as source for k, v := range labels { - if reg := strings.TrimPrefix(k, labelDistributionSource); reg != k { + if reg := strings.TrimPrefix(k, containerdlabels.LabelDistributionSource); reg != k { for _, repo := range strings.Split(v, ",") { ref, err := reference.ParseNamed(reg + "/" + repo) if err != nil { @@ -287,7 +284,7 @@ type distributionSource struct { func (source distributionSource) ToAnnotation() (string, string) { domain := reference.Domain(source.registryRef) v := reference.Path(source.registryRef) - return labelDistributionSource + domain, v + return containerdlabels.LabelDistributionSource + domain, v } func (source distributionSource) GetReference(dgst digest.Digest) (reference.Named, error) { diff --git a/daemon/containerd/store.go b/daemon/containerd/store.go index 44650502c4..90e56bdf82 100644 --- a/daemon/containerd/store.go +++ b/daemon/containerd/store.go @@ -5,6 +5,7 @@ import ( "github.com/containerd/containerd/content" cerrdefs "github.com/containerd/containerd/errdefs" + containerdlabels "github.com/containerd/containerd/labels" "github.com/distribution/reference" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -42,7 +43,7 @@ func (p fakeStoreWithSources) Info(ctx context.Context, dgst digest.Digest) (con return info, err } - key := labelDistributionSource + reference.Domain(source.registryRef) + key := containerdlabels.LabelDistributionSource + reference.Domain(source.registryRef) value := reference.Path(source.registryRef) return content.Info{ Digest: dgst, From c217e3c87a7d5efe981618a2504b470f62c8b141 Mon Sep 17 00:00:00 2001 From: Bjorn Neergaard Date: Mon, 24 Jul 2023 06:34:22 -0600 Subject: [PATCH 04/11] vendor: github.com/moby/buildkit v0.12.2 The following changes were required: * integration/build: progressui's signature changed in https://github.com/moby/buildkit/commit/6b8fbed01e7b857e53d5378b04152b583fae670e * builder-next: flightcontrol.Group has become a generic type in https://github.com/moby/buildkit/commit/8ffc03b8f0426f6426cb25db744dc8976de0dccf * builder-next/executor: add github.com/moby/buildkit/executor/resources types, necessitated by https://github.com/moby/buildkit/commit/6e87e4b455cdbf5454bb1db2da859a4d586c828b * builder-next: stub util/network/Namespace.Sample(), necessitated by https://github.com/moby/buildkit/commit/963f16179f04c9061a8ecdb9c5cc813b99e1e423 Co-authored-by: CrazyMax Co-authored-by: Sebastiaan van Stijn Signed-off-by: Bjorn Neergaard --- .../adapters/containerimage/pull.go | 16 +- builder/builder-next/executor_linux.go | 12 + builder/builder-next/executor_nolinux.go | 5 +- integration/build/build_traces_test.go | 2 +- vendor.mod | 8 +- vendor.sum | 24 +- .../anchore/go-struct-converter/.bouncer.yaml | 10 + .../anchore/go-struct-converter/.gitignore | 30 + .../go-struct-converter/.golangci.yaml | 78 ++ .../go-struct-converter/CONTRIBUTING.md | 86 ++ .../go-struct-converter}/LICENSE | 18 +- .../anchore/go-struct-converter/Makefile | 81 ++ .../anchore/go-struct-converter/README.md | 166 +++ .../anchore/go-struct-converter/chain.go | 95 ++ .../anchore/go-struct-converter/converter.go | 334 ++++++ .../pkg/converter/constant.go | 1 - .../pkg/converter/convert_unix.go | 654 ++++++++--- .../pkg/converter/cs_proxy_unix.go | 168 +++ .../pkg/converter/tool/builder.go | 167 ++- .../pkg/converter/tool/feature.go | 113 ++ .../nydus-snapshotter/pkg/converter/types.go | 103 +- .../nydus-snapshotter/pkg/converter/utils.go | 20 +- .../nydus-snapshotter/pkg/errdefs/errors.go | 17 +- .../nydus-snapshotter/pkg/label/label.go | 64 + .../github.com/containerd/typeurl/.gitignore | 2 - .../github.com/containerd/typeurl/README.md | 20 - vendor/github.com/containerd/typeurl/doc.go | 83 -- vendor/github.com/containerd/typeurl/types.go | 214 ---- .../github.com/moby/buildkit/cache/blobs.go | 63 +- .../moby/buildkit/cache/compression_nydus.go | 20 +- .../buildkit/cache/contenthash/checksum.go | 2 + .../moby/buildkit/cache/filelist.go | 9 +- .../github.com/moby/buildkit/cache/manager.go | 12 +- .../moby/buildkit/cache/metadata.go | 2 +- .../moby/buildkit/cache/metadata/metadata.go | 7 +- vendor/github.com/moby/buildkit/cache/refs.go | 122 +- .../github.com/moby/buildkit/cache/remote.go | 14 +- .../moby/buildkit/cache/remotecache/export.go | 167 ++- .../buildkit/cache/remotecache/gha/gha.go | 6 +- .../moby/buildkit/cache/remotecache/import.go | 54 +- .../cache/remotecache/inline/inline.go | 4 +- .../buildkit/cache/remotecache/local/local.go | 59 +- .../cache/remotecache/registry/registry.go | 117 +- .../cache/remotecache/v1/cachestorage.go | 2 +- .../buildkit/cache/remotecache/v1/chains.go | 6 +- .../buildkit/cache/remotecache/v1/utils.go | 12 +- .../moby/buildkit/cache/util/fsutil.go | 32 +- .../github.com/moby/buildkit/client/client.go | 194 ++- .../moby/buildkit/client/llb/async.go | 4 +- .../moby/buildkit/client/llb/definition.go | 24 +- .../moby/buildkit/client/llb/diff.go | 4 +- .../moby/buildkit/client/llb/exec.go | 7 +- .../moby/buildkit/client/llb/fileop.go | 61 +- .../client/llb/imagemetaresolver/resolver.go | 14 +- .../moby/buildkit/client/llb/merge.go | 27 +- .../moby/buildkit/client/llb/meta.go | 27 +- .../moby/buildkit/client/llb/resolver.go | 5 +- .../moby/buildkit/client/llb/source.go | 28 +- .../moby/buildkit/client/llb/sourcemap.go | 18 +- .../moby/buildkit/client/llb/state.go | 65 ++ .../moby/buildkit/client/ociindex/ociindex.go | 47 +- .../github.com/moby/buildkit/client/solve.go | 2 +- .../buildkit/cmd/buildkitd/config/config.go | 15 +- .../buildkit/cmd/buildkitd/config/gcpolicy.go | 87 +- .../cmd/buildkitd/config/gcpolicy_unix.go | 15 +- .../cmd/buildkitd/config/gcpolicy_windows.go | 8 +- .../moby/buildkit/control/control.go | 75 +- .../executor/containerdexecutor/executor.go | 37 +- .../moby/buildkit/executor/executor.go | 3 +- .../moby/buildkit/executor/oci/hosts.go | 4 +- .../moby/buildkit/executor/oci/resolvconf.go | 62 +- .../moby/buildkit/executor/oci/spec.go | 21 +- .../moby/buildkit/executor/oci/spec_unix.go | 33 + .../buildkit/executor/oci/spec_windows.go | 24 + .../moby/buildkit/executor/resources/cpu.go | 141 +++ .../moby/buildkit/executor/resources/io.go | 117 ++ .../buildkit/executor/resources/memory.go | 159 +++ .../buildkit/executor/resources/monitor.go | 287 +++++ .../executor/resources/monitor_linux.go | 15 + .../executor/resources/monitor_nolinux.go | 8 + .../moby/buildkit/executor/resources/pids.go | 45 + .../buildkit/executor/resources/sampler.go | 139 +++ .../moby/buildkit/executor/resources/sys.go | 9 + .../buildkit/executor/resources/sys_linux.go | 93 ++ .../executor/resources/sys_nolinux.go | 9 + .../executor/resources/types/systypes.go | 72 ++ .../executor/resources/types/types.go | 104 ++ .../executor/runcexecutor/executor.go | 365 ++++-- .../executor/runcexecutor/executor_common.go | 52 +- .../executor/runcexecutor/executor_linux.go | 62 +- .../moby/buildkit/executor/stubs.go | 13 +- .../exporter/containerimage/attestations.go | 10 +- .../exporter/containerimage/export.go | 160 +-- .../exporter/containerimage/exptypes/keys.go | 75 ++ .../exporter/containerimage/exptypes/types.go | 3 - .../containerimage/image/docker_image.go | 8 +- .../buildkit/exporter/containerimage/opts.go | 67 +- .../exporter/containerimage/writer.go | 124 +- .../moby/buildkit/exporter/exptypes/keys.go | 15 + .../moby/buildkit/exporter/local/export.go | 70 +- .../moby/buildkit/exporter/local/fs.go | 44 + .../moby/buildkit/exporter/oci/export.go | 51 +- .../moby/buildkit/exporter/tar/export.go | 31 +- .../buildkit/exporter/util/epoch/parse.go | 8 +- .../frontend/attestations/sbom/sbom.go | 14 +- .../frontend/dockerfile/builder/build.go | 1037 ++--------------- .../dockerfile/builder/subrequests.go | 54 - .../dockerfile/dockerfile2llb/convert.go | 280 +++-- .../dockerfile2llb/convert_addchecksum.go | 6 - .../dockerfile2llb/convert_addgit.go | 6 - .../dockerfile2llb/convert_noaddchecksum.go | 6 - .../dockerfile2llb/convert_noaddgit.go | 6 - .../dockerfile2llb/convert_runmount.go | 2 +- .../dockerfile/dockerfile2llb/image.go | 10 +- .../frontend/dockerfile/instructions/bflag.go | 25 +- .../instructions/commands_runmount.go | 129 +- .../instructions/commands_runnetwork.go | 12 +- .../instructions/commands_secrets.go | 5 - .../dockerfile/instructions/commands_ssh.go | 5 - .../frontend/dockerfile/instructions/parse.go | 14 +- .../frontend/dockerfile/parser/parser.go | 3 +- .../dockerfile/shell/equal_env_unix.go | 4 +- .../dockerfile/shell/equal_env_windows.go | 6 +- .../buildkit/frontend/dockerfile/shell/lex.go | 81 +- .../moby/buildkit/frontend/dockerui/attr.go | 138 +++ .../moby/buildkit/frontend/dockerui/build.go | 114 ++ .../moby/buildkit/frontend/dockerui/config.go | 496 ++++++++ .../buildkit/frontend/dockerui/context.go | 194 +++ .../frontend/dockerui/namedcontext.go | 253 ++++ .../buildkit/frontend/dockerui/requests.go | 91 ++ .../moby/buildkit/frontend/frontend.go | 2 +- .../frontend/gateway/client/client.go | 4 +- .../gateway/{ => container}/container.go | 60 +- .../frontend/gateway/{ => container}/util.go | 2 +- .../frontend/gateway/forwarder/forward.go | 43 +- .../frontend/gateway/forwarder/frontend.go | 2 +- .../moby/buildkit/frontend/gateway/gateway.go | 89 +- .../frontend/gateway/grpcclient/client.go | 44 +- .../moby/buildkit/frontend/gateway/pb/caps.go | 11 + .../moby/buildkit/frontend/gateway/pb/exit.go | 2 +- .../frontend/gateway/pb/gateway.pb.go | 557 ++++++--- .../frontend/gateway/pb/gateway.proto | 13 +- .../buildkit/session/filesync/filesync.go | 65 +- .../github.com/moby/buildkit/session/grpc.go | 2 +- .../moby/buildkit/session/sshforward/copy.go | 6 +- .../buildkit/snapshot/containerd/content.go | 97 +- .../moby/buildkit/snapshot/diffapply_unix.go | 12 +- .../buildkit/snapshot/localmounter_windows.go | 67 +- .../moby/buildkit/snapshot/snapshotter.go | 2 + .../moby/buildkit/solver/cachemanager.go | 15 +- .../moby/buildkit/solver/cachestorage.go | 2 +- .../moby/buildkit/solver/combinedcache.go | 4 +- .../github.com/moby/buildkit/solver/edge.go | 4 +- .../buildkit/solver/errdefs/fronetendcap.go | 2 +- .../moby/buildkit/solver/errdefs/solve.go | 2 +- .../buildkit/solver/errdefs/subrequest.go | 2 +- .../moby/buildkit/solver/errdefs/vertex.go | 2 +- .../github.com/moby/buildkit/solver/jobs.go | 49 +- .../moby/buildkit/solver/llbsolver/bridge.go | 43 +- .../buildkit/solver/llbsolver/file/backend.go | 51 +- .../moby/buildkit/solver/llbsolver/history.go | 315 ++++- .../buildkit/solver/llbsolver/mounts/mount.go | 43 +- .../buildkit/solver/llbsolver/ops/exec.go | 31 +- .../buildkit/solver/llbsolver/ops/file.go | 36 +- .../solver/llbsolver/proc/provenance.go | 5 +- .../buildkit/solver/llbsolver/proc/sbom.go | 13 +- .../buildkit/solver/llbsolver/provenance.go | 205 +++- .../llbsolver/provenance/buildconfig.go | 167 +-- .../solver/llbsolver/provenance/capture.go | 43 +- .../solver/llbsolver/provenance/predicate.go | 38 +- .../moby/buildkit/solver/llbsolver/solver.go | 83 +- .../buildkit/solver/memorycachestorage.go | 2 +- .../moby/buildkit/solver/pb/caps.go | 4 +- .../moby/buildkit/solver/pb/ops.pb.go | 376 +++--- .../moby/buildkit/solver/pb/ops.proto | 1 + .../buildkit/solver/result/attestation.go | 8 +- .../moby/buildkit/solver/result/result.go | 33 +- .../github.com/moby/buildkit/solver/types.go | 2 +- .../buildkit/source/containerimage/pull.go | 48 +- .../moby/buildkit/source/git/gitsource.go | 3 +- .../buildkit/source/git/gitsource_unix.go | 81 +- .../buildkit/source/git/gitsource_windows.go | 2 +- .../moby/buildkit/source/http/httpsource.go | 10 + .../moby/buildkit/sourcepolicy/engine.go | 17 +- .../moby/buildkit/util/archutil/Dockerfile | 2 +- .../moby/buildkit/util/archutil/detect.go | 8 +- .../moby/buildkit/util/attestation/types.go | 2 - .../moby/buildkit/util/bklog/log.go | 13 + .../moby/buildkit/util/buildinfo/buildinfo.go | 452 ------- .../buildkit/util/buildinfo/types/types.go | 55 - .../moby/buildkit/util/compression/attrs.go | 48 + .../buildkit/util/compression/compression.go | 39 +- .../moby/buildkit/util/compression/zstd.go | 2 +- .../moby/buildkit/util/contentutil/copy.go | 2 + .../moby/buildkit/util/contentutil/refs.go | 2 - .../moby/buildkit/util/contentutil/types.go | 15 + .../entitlements/security/security_linux.go | 12 +- .../util/flightcontrol/flightcontrol.go | 53 +- .../buildkit/util/grpcerrors/grpcerrors.go | 13 +- .../buildkit/util/grpcerrors/intercept.go | 6 +- .../moby/buildkit/util/imageutil/buildinfo.go | 34 - .../moby/buildkit/util/imageutil/config.go | 74 +- .../moby/buildkit/util/leaseutil/manager.go | 26 +- .../util/network/cniprovider/allowempty.s | 0 .../buildkit/util/network/cniprovider/cni.go | 73 +- .../util/network/cniprovider/cni_linux.go | 70 ++ .../util/network/cniprovider/cni_nolinux.go | 12 + .../util/network/cniprovider/cni_unsafe.go | 17 - .../network/cniprovider/createns_linux.go | 79 +- .../moby/buildkit/util/network/host.go | 4 + .../util/network/netproviders/network_unix.go | 4 +- .../network/netproviders/network_windows.go | 4 +- .../moby/buildkit/util/network/network.go | 13 + .../moby/buildkit/util/network/none.go | 4 + .../moby/buildkit/util/overlay/overlay.go | 8 + .../buildkit/util/overlay/overlay_linux.go | 13 +- .../buildkit/util/progress/multiwriter.go | 2 +- .../util/progress/progressui/colors.go | 18 +- .../util/progress/progressui/display.go | 41 +- .../util/progress/progressui/printer.go | 11 +- .../moby/buildkit/util/pull/pull.go | 18 +- .../util/pull/pullprogress/progress.go | 17 +- .../moby/buildkit/util/purl/image.go | 4 +- .../moby/buildkit/util/push/push.go | 23 +- .../moby/buildkit/util/resolver/authorizer.go | 13 +- .../buildkit/util/resolver/limited/group.go | 4 +- .../moby/buildkit/util/resolver/pool.go | 9 +- .../moby/buildkit/util/resolver/resolver.go | 28 +- .../moby/buildkit/util/resolver/utils.go | 22 + .../moby/buildkit/util/stack/stack.go | 2 +- .../moby/buildkit/util/stack/stack.pb.go | 2 +- .../moby/buildkit/util/system/path.go | 206 ++++ .../moby/buildkit/util/system/path_unix.go | 10 - .../moby/buildkit/util/system/path_windows.go | 35 - .../buildkit/util/tracing/detect/detect.go | 8 +- .../util/tracing/detect/threadsafe.go | 26 - .../moby/buildkit/util/tracing/tracing.go | 2 +- .../util/tracing/transform/instrumentation.go | 12 +- .../buildkit/util/tracing/transform/span.go | 13 +- .../moby/buildkit/util/winlayers/applier.go | 6 + .../moby/buildkit/util/winlayers/differ.go | 12 +- vendor/github.com/moby/buildkit/version/ua.go | 49 + .../moby/buildkit/version/version.go | 30 +- .../moby/buildkit/worker/base/worker.go | 20 +- .../moby/buildkit/worker/cacheresult.go | 4 +- .../buildkit/worker/containerd/containerd.go | 4 +- .../github.com/moby/buildkit/worker/worker.go | 10 +- .../spdx/tools-golang/convert/chain.go | 39 + .../spdx/tools-golang/convert/struct.go | 50 + .../spdx/tools-golang/json/parser.go | 48 - .../spdx/tools-golang/json/reader.go | 83 ++ .../spdx/tools-golang/json/writer.go | 44 +- .../spdx/tools-golang/spdx/common/types.go | 6 + .../spdx/tools-golang/spdx/model.go | 133 +++ .../spdx/{ => v2}/common/annotation.go | 0 .../spdx/{ => v2}/common/checksum.go | 2 +- .../spdx/{ => v2}/common/creation_info.go | 0 .../spdx/{ => v2}/common/external.go | 7 + .../spdx/{ => v2}/common/identifier.go | 0 .../spdx/{ => v2}/common/package.go | 0 .../spdx/{ => v2}/common/snippet.go | 0 .../tools-golang/spdx/v2/v2_1/annotation.go | 31 + .../spdx/v2/v2_1/creation_info.go | 28 + .../spdx/{v2_2 => v2/v2_1}/document.go | 38 +- .../spdx/tools-golang/spdx/v2/v2_1/file.go | 92 ++ .../spdx/v2/v2_1/other_license.go | 31 + .../spdx/tools-golang/spdx/v2/v2_1/package.go | 122 ++ .../tools-golang/spdx/v2/v2_1/relationship.go | 25 + .../spdx/tools-golang/spdx/v2/v2_1/review.go | 25 + .../spdx/tools-golang/spdx/v2/v2_1/snippet.go | 46 + .../spdx/{ => v2}/v2_2/annotation.go | 4 +- .../spdx/{ => v2}/v2_2/creation_info.go | 6 +- .../tools-golang/spdx/v2/v2_2/document.go | 150 +++ .../tools-golang/spdx/{ => v2}/v2_2/file.go | 4 +- .../spdx/{ => v2}/v2_2/other_license.go | 0 .../spdx/{ => v2}/v2_2/package.go | 78 +- .../spdx/{ => v2}/v2_2/relationship.go | 4 +- .../tools-golang/spdx/{ => v2}/v2_2/review.go | 0 .../spdx/{ => v2}/v2_2/snippet.go | 4 +- .../spdx/{ => v2}/v2_3/annotation.go | 6 +- .../spdx/{ => v2}/v2_3/creation_info.go | 9 +- .../tools-golang/spdx/v2/v2_3/document.go | 150 +++ .../tools-golang/spdx/{ => v2}/v2_3/file.go | 8 +- .../spdx/{ => v2}/v2_3/other_license.go | 3 +- .../spdx/{ => v2}/v2_3/package.go | 84 +- .../spdx/{ => v2}/v2_3/relationship.go | 7 +- .../tools-golang/spdx/{ => v2}/v2_3/review.go | 2 +- .../spdx/{ => v2}/v2_3/snippet.go | 6 +- .../spdx/tools-golang/spdx/v2_3/document.go | 65 -- vendor/modules.txt | 32 +- 290 files changed, 10730 insertions(+), 4943 deletions(-) create mode 100644 vendor/github.com/anchore/go-struct-converter/.bouncer.yaml create mode 100644 vendor/github.com/anchore/go-struct-converter/.gitignore create mode 100644 vendor/github.com/anchore/go-struct-converter/.golangci.yaml create mode 100644 vendor/github.com/anchore/go-struct-converter/CONTRIBUTING.md rename vendor/github.com/{containerd/typeurl => anchore/go-struct-converter}/LICENSE (93%) create mode 100644 vendor/github.com/anchore/go-struct-converter/Makefile create mode 100644 vendor/github.com/anchore/go-struct-converter/README.md create mode 100644 vendor/github.com/anchore/go-struct-converter/chain.go create mode 100644 vendor/github.com/anchore/go-struct-converter/converter.go create mode 100644 vendor/github.com/containerd/nydus-snapshotter/pkg/converter/cs_proxy_unix.go create mode 100644 vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/feature.go create mode 100644 vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go delete mode 100644 vendor/github.com/containerd/typeurl/.gitignore delete mode 100644 vendor/github.com/containerd/typeurl/README.md delete mode 100644 vendor/github.com/containerd/typeurl/doc.go delete mode 100644 vendor/github.com/containerd/typeurl/types.go create mode 100644 vendor/github.com/moby/buildkit/executor/resources/cpu.go create mode 100644 vendor/github.com/moby/buildkit/executor/resources/io.go create mode 100644 vendor/github.com/moby/buildkit/executor/resources/memory.go create mode 100644 vendor/github.com/moby/buildkit/executor/resources/monitor.go create mode 100644 vendor/github.com/moby/buildkit/executor/resources/monitor_linux.go create mode 100644 vendor/github.com/moby/buildkit/executor/resources/monitor_nolinux.go create mode 100644 vendor/github.com/moby/buildkit/executor/resources/pids.go create mode 100644 vendor/github.com/moby/buildkit/executor/resources/sampler.go create mode 100644 vendor/github.com/moby/buildkit/executor/resources/sys.go create mode 100644 vendor/github.com/moby/buildkit/executor/resources/sys_linux.go create mode 100644 vendor/github.com/moby/buildkit/executor/resources/sys_nolinux.go create mode 100644 vendor/github.com/moby/buildkit/executor/resources/types/systypes.go create mode 100644 vendor/github.com/moby/buildkit/executor/resources/types/types.go create mode 100644 vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go create mode 100644 vendor/github.com/moby/buildkit/exporter/exptypes/keys.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addgit.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerui/attr.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerui/build.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerui/config.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerui/context.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerui/requests.go rename vendor/github.com/moby/buildkit/frontend/gateway/{ => container}/container.go (87%) rename vendor/github.com/moby/buildkit/frontend/gateway/{ => container}/util.go (96%) delete mode 100644 vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go delete mode 100644 vendor/github.com/moby/buildkit/util/buildinfo/types/types.go create mode 100644 vendor/github.com/moby/buildkit/util/compression/attrs.go create mode 100644 vendor/github.com/moby/buildkit/util/contentutil/types.go delete mode 100644 vendor/github.com/moby/buildkit/util/imageutil/buildinfo.go delete mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/allowempty.s create mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/cni_linux.go create mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/cni_nolinux.go delete mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go create mode 100644 vendor/github.com/moby/buildkit/util/overlay/overlay.go create mode 100644 vendor/github.com/moby/buildkit/util/resolver/utils.go delete mode 100644 vendor/github.com/moby/buildkit/util/system/path_unix.go delete mode 100644 vendor/github.com/moby/buildkit/util/system/path_windows.go delete mode 100644 vendor/github.com/moby/buildkit/util/tracing/detect/threadsafe.go create mode 100644 vendor/github.com/moby/buildkit/version/ua.go create mode 100644 vendor/github.com/spdx/tools-golang/convert/chain.go create mode 100644 vendor/github.com/spdx/tools-golang/convert/struct.go delete mode 100644 vendor/github.com/spdx/tools-golang/json/parser.go create mode 100644 vendor/github.com/spdx/tools-golang/json/reader.go create mode 100644 vendor/github.com/spdx/tools-golang/spdx/common/types.go create mode 100644 vendor/github.com/spdx/tools-golang/spdx/model.go rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/common/annotation.go (100%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/common/checksum.go (91%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/common/creation_info.go (100%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/common/external.go (93%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/common/identifier.go (100%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/common/package.go (100%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/common/snippet.go (100%) create mode 100644 vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/annotation.go create mode 100644 vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/creation_info.go rename vendor/github.com/spdx/tools-golang/spdx/{v2_2 => v2/v2_1}/document.go (69%) create mode 100644 vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/file.go create mode 100644 vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/other_license.go create mode 100644 vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/package.go create mode 100644 vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/relationship.go create mode 100644 vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/review.go create mode 100644 vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/snippet.go rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_2/annotation.go (94%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_2/creation_info.go (84%) create mode 100644 vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/document.go rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_2/file.go (98%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_2/other_license.go (100%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_2/package.go (73%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_2/relationship.go (92%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_2/review.go (100%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_2/snippet.go (96%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_3/annotation.go (88%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_3/creation_info.go (80%) create mode 100644 vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/document.go rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_3/file.go (94%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_3/other_license.go (90%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_3/package.go (75%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_3/relationship.go (81%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_3/review.go (89%) rename vendor/github.com/spdx/tools-golang/spdx/{ => v2}/v2_3/snippet.go (92%) delete mode 100644 vendor/github.com/spdx/tools-golang/spdx/v2_3/document.go diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go index 890261ce39..55394948ea 100644 --- a/builder/builder-next/adapters/containerimage/pull.go +++ b/builder/builder-next/adapters/containerimage/pull.go @@ -63,7 +63,7 @@ type SourceOpt struct { // Source is the source implementation for accessing container images type Source struct { SourceOpt - g flightcontrol.Group + g flightcontrol.Group[interface{}] } // NewSource creates a new image source @@ -187,7 +187,7 @@ func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session type puller struct { is *Source resolveLocalOnce sync.Once - g flightcontrol.Group + g flightcontrol.Group[struct{}] src *source.ImageIdentifier desc ocispec.Descriptor ref string @@ -258,7 +258,7 @@ func (p *puller) resolveLocal() { } func (p *puller) resolve(ctx context.Context, g session.Group) error { - _, err := p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) { + _, err := p.g.Do(ctx, "", func(ctx context.Context) (_ struct{}, err error) { resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String()) defer func() { resolveProgressDone(err) @@ -266,13 +266,13 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error { ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String()) if err != nil { - return nil, err + return struct{}{}, err } if p.desc.Digest == "" && p.config == nil { origRef, desc, err := p.resolver(g).Resolve(ctx, ref.String()) if err != nil { - return nil, err + return struct{}{}, err } p.desc = desc @@ -287,16 +287,16 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error { if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest { ref, err := distreference.WithDigest(ref, p.desc.Digest) if err != nil { - return nil, err + return struct{}{}, err } _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm, g) if err != nil { - return nil, err + return struct{}{}, err } p.config = dt } - return nil, nil + return struct{}{}, nil }) return err } diff --git a/builder/builder-next/executor_linux.go b/builder/builder-next/executor_linux.go index b672d718bd..bee2371220 100644 --- a/builder/builder-next/executor_linux.go +++ b/builder/builder-next/executor_linux.go @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor/oci" + "github.com/moby/buildkit/executor/resources" "github.com/moby/buildkit/executor/runcexecutor" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/solver/pb" @@ -49,6 +50,11 @@ func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfi pidmap = nil } + rm, err := resources.NewMonitor() + if err != nil { + return nil, err + } + return runcexecutor.New(runcexecutor.Opt{ Root: filepath.Join(root, "executor"), CommandCandidates: []string{"runc"}, @@ -58,6 +64,7 @@ func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfi IdentityMapping: pidmap, DNS: dnsConfig, ApparmorProfile: apparmorProfile, + ResourceMonitor: rm, }, networkProviders) } @@ -119,6 +126,11 @@ func (iface *lnInterface) init(c *libnetwork.Controller, n *libnetwork.Network) iface.ep = ep } +// TODO(neersighted): Unstub Sample(), and collect data from the libnetwork Endpoint. +func (iface *lnInterface) Sample() (*network.Sample, error) { + return &network.Sample{}, nil +} + func (iface *lnInterface) Set(s *specs.Spec) error { <-iface.ready if iface.err != nil { diff --git a/builder/builder-next/executor_nolinux.go b/builder/builder-next/executor_nolinux.go index 7275822da2..5c4ee6a72b 100644 --- a/builder/builder-next/executor_nolinux.go +++ b/builder/builder-next/executor_nolinux.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor/oci" + resourcetypes "github.com/moby/buildkit/executor/resources/types" ) func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool, _ idtools.IdentityMapping, _ string) (executor.Executor, error) { @@ -20,8 +21,8 @@ func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool type stubExecutor struct{} -func (w *stubExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { - return errors.New("buildkit executor not implemented for "+runtime.GOOS) +func (w *stubExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (resourcetypes.Recorder, error) { + return nil, errors.New("buildkit executor not implemented for "+runtime.GOOS) } func (w *stubExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) error { diff --git a/integration/build/build_traces_test.go b/integration/build/build_traces_test.go index 1f047a98d8..1cbc02f987 100644 --- a/integration/build/build_traces_test.go +++ b/integration/build/build_traces_test.go @@ -57,7 +57,7 @@ func TestBuildkitHistoryTracePropagation(t *testing.T) { }() eg.Go(func() error { - _, err := progressui.DisplaySolveStatus(ctxGo, "test", nil, &testWriter{t}, ch) + _, err := progressui.DisplaySolveStatus(ctxGo, nil, &testWriter{t}, ch, progressui.WithPhase("test")) return err }) diff --git a/vendor.mod b/vendor.mod index a178966d8e..dfab3a75a1 100644 --- a/vendor.mod +++ b/vendor.mod @@ -61,7 +61,7 @@ require ( github.com/miekg/dns v1.1.43 github.com/mistifyio/go-zfs/v3 v3.0.1 github.com/mitchellh/copystructure v1.2.0 - github.com/moby/buildkit v0.11.7-0.20230723230859-616c3f613b54 // v0.11 branch + github.com/moby/buildkit v0.12.2 github.com/moby/ipvs v1.1.0 github.com/moby/locker v1.0.1 github.com/moby/patternmatcher v0.6.0 @@ -116,6 +116,7 @@ require ( cloud.google.com/go/longrunning v0.4.1 // indirect github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect github.com/agext/levenshtein v1.2.3 // indirect + github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 // indirect @@ -134,10 +135,9 @@ require ( github.com/containerd/console v1.0.3 // indirect github.com/containerd/go-cni v1.1.9 // indirect github.com/containerd/go-runc v1.1.0 // indirect - github.com/containerd/nydus-snapshotter v0.3.1 // indirect + github.com/containerd/nydus-snapshotter v0.8.2 // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect github.com/containerd/ttrpc v1.2.2 // indirect - github.com/containerd/typeurl v1.0.2 // indirect github.com/containernetworking/cni v1.1.2 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect @@ -177,7 +177,7 @@ require ( github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f // indirect + github.com/spdx/tools-golang v0.5.1 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tinylib/msgp v1.1.8 // indirect github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb // indirect diff --git a/vendor.sum b/vendor.sum index 44cf660fd8..53eb4aafc1 100644 --- a/vendor.sum +++ b/vendor.sum @@ -117,8 +117,10 @@ github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0 h1:cOjLyhBhe91glgZZNbQUg9BJC57l6BiSKov0Ivv7k0U= github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0/go.mod h1:fBaQWrftOD5CrVCUfoYGHs4X4VViTuGOXA8WloCjTY0= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.1.0 h1:Y2lUDsFKVRSYGojLJ1yLxSXdMmMYTYls0rCvoqmMUQk= github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= @@ -155,6 +157,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc= +github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -326,8 +330,8 @@ github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHr github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/go-runc v1.1.0 h1:OX4f+/i2y5sUT7LhmcJH7GYrjjhHa1QI4e8yO0gGleA= github.com/containerd/go-runc v1.1.0/go.mod h1:xJv2hFF7GvHtTJd9JqTS2UVxMkULUYw4JN5XAUZqH5U= -github.com/containerd/nydus-snapshotter v0.3.1 h1:b8WahTrPkt3XsabjG2o/leN4fw3HWZYr+qxo/Z8Mfzk= -github.com/containerd/nydus-snapshotter v0.3.1/go.mod h1:+8R7NX7vrjlxAgtidnsstwIhpzyTlriYPssTxH++uiM= +github.com/containerd/nydus-snapshotter v0.8.2 h1:7SOrMU2YmLzfbsr5J7liMZJlNi5WT6vtIOxLGv+iz7E= +github.com/containerd/nydus-snapshotter v0.8.2/go.mod h1:UJILTN5LVBRY+dt8BGJbp72Xy729hUZsOugObEI3/O8= github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4= github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= @@ -337,8 +341,6 @@ github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtO github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= -github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= @@ -626,7 +628,6 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -906,8 +907,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ= -github.com/moby/buildkit v0.11.7-0.20230723230859-616c3f613b54 h1:LSh03Csyx/zQq8MreC9MYMQE/+5EkohwZMvXSS6kMZo= -github.com/moby/buildkit v0.11.7-0.20230723230859-616c3f613b54/go.mod h1:bMQDryngJKGvJ/ZuRFhrejurbvYSv3NkGCheQ59X4AM= +github.com/moby/buildkit v0.12.2 h1:B7guBgY6sfk4dBlv/ORUxyYlp0UojYaYyATgtNwSCXc= +github.com/moby/buildkit v0.12.2/go.mod h1:adB4y0SxxX8trnrY+oEulb48ODLqPO6pKMF0ppGcCoI= github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ= github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= @@ -1159,8 +1160,8 @@ github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34c github.com/sourcegraph/go-diff v0.5.3/go.mod h1:v9JDtjCE4HHHCZGId75rg8gkKKa98RVjBcBGsVmMmak= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM= -github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f h1:9B623Cfs+mclYK6dsae7gLSwuIBHvlgmEup87qpqsAQ= -github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f/go.mod h1:VHzvNsKAfAGqs4ZvwRL+7a0dNsL20s7lGui4K9C0xQM= +github.com/spdx/tools-golang v0.5.1 h1:fJg3SVOGG+eIva9ZUBm/hvyA7PIPVFjRxUKe6fdAgwE= +github.com/spdx/tools-golang v0.5.1/go.mod h1:/DRDQuBfB37HctM29YtrX1v+bXiVmT2OpQDalRmX9aU= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -1201,7 +1202,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1951,6 +1953,8 @@ k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +kernel.org/pub/linux/libs/security/libcap/cap v1.2.67 h1:sPQ9qlSNR26fToTKbxe/HDWJlXvBLqGmt84LGCQkOy0= +kernel.org/pub/linux/libs/security/libcap/psx v1.2.67 h1:NxbXJ7pDVq0FKBsqjieT92QDXI2XaqH2HAi4QcCOHt8= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= diff --git a/vendor/github.com/anchore/go-struct-converter/.bouncer.yaml b/vendor/github.com/anchore/go-struct-converter/.bouncer.yaml new file mode 100644 index 0000000000..db50b4d30f --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/.bouncer.yaml @@ -0,0 +1,10 @@ +permit: + - BSD.* + - CC0.* + - MIT.* + - Apache.* + - MPL.* + - ISC + - WTFPL + +ignore-packages: diff --git a/vendor/github.com/anchore/go-struct-converter/.gitignore b/vendor/github.com/anchore/go-struct-converter/.gitignore new file mode 100644 index 0000000000..1edd832da1 --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/.gitignore @@ -0,0 +1,30 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +# tools +.tmp + +# test output +test/results + +# IDE project files +.idea diff --git a/vendor/github.com/anchore/go-struct-converter/.golangci.yaml b/vendor/github.com/anchore/go-struct-converter/.golangci.yaml new file mode 100644 index 0000000000..fdb37721db --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/.golangci.yaml @@ -0,0 +1,78 @@ +#issues: +# # The list of ids of default excludes to include or disable. +# include: +# - EXC0002 # disable excluding of issues about comments from golint + +linters: + # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint + disable-all: true + enable: + - asciicheck + - bodyclose + - depguard + - dogsled + - dupl + - errcheck + - exportloopref + - funlen + - gocognit + - goconst + - gocritic + - gocyclo + - gofmt + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - nolintlint + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - whitespace + +# do not enable... +# - gochecknoglobals +# - gochecknoinits # this is too aggressive +# - rowserrcheck disabled per generics https://github.com/golangci/golangci-lint/issues/2649 +# - godot +# - godox +# - goerr113 +# - goimports # we're using gosimports now instead to account for extra whitespaces (see https://github.com/golang/go/issues/20818) +# - golint # deprecated +# - gomnd # this is too aggressive +# - interfacer # this is a good idea, but is no longer supported and is prone to false positives +# - lll # without a way to specify per-line exception cases, this is not usable +# - maligned # this is an excellent linter, but tricky to optimize and we are not sensitive to memory layout optimizations +# - nestif +# - prealloc # following this rule isn't consistently a good idea, as it sometimes forces unnecessary allocations that result in less idiomatic code +# - scopelint # deprecated +# - testpackage +# - wsl # this doens't have an auto-fixer yet and is pretty noisy (https://github.com/bombsimon/wsl/issues/90) + +linters-settings: + funlen: + # Checks the number of lines in a function. + # If lower than 0, disable the check. + # Default: 60 + lines: 140 + # Checks the number of statements in a function. + # If lower than 0, disable the check. + # Default: 40 + statements: 100 + + gocognit: + # Minimal code complexity to report + # Default: 30 (but we recommend 10-20) + min-complexity: 80 + + gocyclo: + # Minimal code complexity to report. + # Default: 30 (but we recommend 10-20) + min-complexity: 50 diff --git a/vendor/github.com/anchore/go-struct-converter/CONTRIBUTING.md b/vendor/github.com/anchore/go-struct-converter/CONTRIBUTING.md new file mode 100644 index 0000000000..9ff2670b2f --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/CONTRIBUTING.md @@ -0,0 +1,86 @@ +# Contributing to go-struct-converter + +If you are looking to contribute to this project and want to open a GitHub pull request ("PR"), there are a few guidelines of what we are looking for in patches. Make sure you go through this document and ensure that your code proposal is aligned. + +## Sign off your work + +The `sign-off` is an added line at the end of the explanation for the commit, certifying that you wrote it or otherwise have the right to submit it as an open-source patch. By submitting a contribution, you agree to be bound by the terms of the DCO Version 1.1 and Apache License Version 2.0. + +Signing off a commit certifies the below Developer's Certificate of Origin (DCO): + +```text +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + + (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + + (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + + (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + + (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +All contributions to this project are licensed under the [Apache License Version 2.0, January 2004](http://www.apache.org/licenses/). + +When committing your change, you can add the required line manually so that it looks like this: + +```text +Signed-off-by: John Doe +``` + +Alternatively, configure your Git client with your name and email to use the `-s` flag when creating a commit: + +```text +$ git config --global user.name "John Doe" +$ git config --global user.email "john.doe@example.com" +``` + +Creating a signed-off commit is then possible with `-s` or `--signoff`: + +```text +$ git commit -s -m "this is a commit message" +``` + +To double-check that the commit was signed-off, look at the log output: + +```text +$ git log -1 +commit 37ceh170e4hb283bb73d958f2036ee5k07e7fde7 (HEAD -> issue-35, origin/main, main) +Author: John Doe +Date: Mon Aug 1 11:27:13 2020 -0400 + + this is a commit message + + Signed-off-by: John Doe +``` + +[//]: # "TODO: Commit guidelines, granular commits" +[//]: # "TODO: Commit guidelines, descriptive messages" +[//]: # "TODO: Commit guidelines, commit title, extra body description" +[//]: # "TODO: PR title and description" + +## Test your changes + +Ensure that your changes have passed the test suite. + +Simply run `make test` to have all tests run and validate changes work properly. + +## Document your changes + +When proposed changes are modifying user-facing functionality or output, it is expected the PR will include updates to the documentation as well. diff --git a/vendor/github.com/containerd/typeurl/LICENSE b/vendor/github.com/anchore/go-struct-converter/LICENSE similarity index 93% rename from vendor/github.com/containerd/typeurl/LICENSE rename to vendor/github.com/anchore/go-struct-converter/LICENSE index 584149b6ee..261eeb9e9f 100644 --- a/vendor/github.com/containerd/typeurl/LICENSE +++ b/vendor/github.com/anchore/go-struct-converter/LICENSE @@ -1,7 +1,6 @@ - Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -176,13 +175,24 @@ END OF TERMS AND CONDITIONS - Copyright The containerd Authors + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/github.com/anchore/go-struct-converter/Makefile b/vendor/github.com/anchore/go-struct-converter/Makefile new file mode 100644 index 0000000000..f5412aef5c --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/Makefile @@ -0,0 +1,81 @@ +TEMPDIR = ./.tmp + +# commands and versions +LINTCMD = $(TEMPDIR)/golangci-lint run --tests=false --timeout=5m --config .golangci.yaml +GOIMPORTS_CMD = $(TEMPDIR)/gosimports -local github.com/anchore + +# tool versions +GOLANGCILINT_VERSION = v1.50.1 +GOSIMPORTS_VERSION = v0.3.4 +BOUNCER_VERSION = v0.4.0 + +# formatting variables +BOLD := $(shell tput -T linux bold) +PURPLE := $(shell tput -T linux setaf 5) +GREEN := $(shell tput -T linux setaf 2) +CYAN := $(shell tput -T linux setaf 6) +RED := $(shell tput -T linux setaf 1) +RESET := $(shell tput -T linux sgr0) +TITLE := $(BOLD)$(PURPLE) +SUCCESS := $(BOLD)$(GREEN) + +# test variables +RESULTSDIR = test/results +COVER_REPORT = $(RESULTSDIR)/unit-coverage-details.txt +COVER_TOTAL = $(RESULTSDIR)/unit-coverage-summary.txt +# the quality gate lower threshold for unit test total % coverage (by function statements) +COVERAGE_THRESHOLD := 80 + +$(RESULTSDIR): + mkdir -p $(RESULTSDIR) + +$(TEMPDIR): + mkdir -p $(TEMPDIR) + +.PHONY: bootstrap-tools +bootstrap-tools: $(TEMPDIR) + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(TEMPDIR)/ $(GOLANGCILINT_VERSION) + curl -sSfL https://raw.githubusercontent.com/wagoodman/go-bouncer/master/bouncer.sh | sh -s -- -b $(TEMPDIR)/ $(BOUNCER_VERSION) + # the only difference between goimports and gosimports is that gosimports removes extra whitespace between import blocks (see https://github.com/golang/go/issues/20818) + GOBIN="$(realpath $(TEMPDIR))" go install github.com/rinchsan/gosimports/cmd/gosimports@$(GOSIMPORTS_VERSION) + +.PHONY: static-analysis +static-analysis: check-licenses lint + +.PHONY: lint +lint: ## Run gofmt + golangci lint checks + $(call title,Running linters) + # ensure there are no go fmt differences + @printf "files with gofmt issues: [$(shell gofmt -l -s .)]\n" + @test -z "$(shell gofmt -l -s .)" + + # run all golangci-lint rules + $(LINTCMD) + @[ -z "$(shell $(GOIMPORTS_CMD) -d .)" ] || (echo "goimports needs to be fixed" && false) + + # go tooling does not play well with certain filename characters, ensure the common cases don't result in future "go get" failures + $(eval MALFORMED_FILENAMES := $(shell find . | grep -e ':')) + @bash -c "[[ '$(MALFORMED_FILENAMES)' == '' ]] || (printf '\nfound unsupported filename characters:\n$(MALFORMED_FILENAMES)\n\n' && false)" + +.PHONY: lint-fix +lint-fix: ## Auto-format all source code + run golangci lint fixers + $(call title,Running lint fixers) + gofmt -w -s . + $(GOIMPORTS_CMD) -w . + $(LINTCMD) --fix + go mod tidy + +.PHONY: check-licenses +check-licenses: ## Ensure transitive dependencies are compliant with the current license policy + $(TEMPDIR)/bouncer check ./... + +.PHONY: unit +unit: $(RESULTSDIR) ## Run unit tests (with coverage) + $(call title,Running unit tests) + go test -coverprofile $(COVER_REPORT) $(shell go list ./... | grep -v anchore/syft/test) + @go tool cover -func $(COVER_REPORT) | grep total | awk '{print substr($$3, 1, length($$3)-1)}' > $(COVER_TOTAL) + @echo "Coverage: $$(cat $(COVER_TOTAL))" + @if [ $$(echo "$$(cat $(COVER_TOTAL)) >= $(COVERAGE_THRESHOLD)" | bc -l) -ne 1 ]; then echo "$(RED)$(BOLD)Failed coverage quality gate (> $(COVERAGE_THRESHOLD)%)$(RESET)" && false; fi + +.PHONY: test +test: unit diff --git a/vendor/github.com/anchore/go-struct-converter/README.md b/vendor/github.com/anchore/go-struct-converter/README.md new file mode 100644 index 0000000000..06d8e4311e --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/README.md @@ -0,0 +1,166 @@ +# Go `struct` Converter + +A library for converting between Go structs. + +```go +chain := converter.NewChain(V1{}, V2{}, V3{}) + +chain.Convert(myV1struct, &myV3struct) +``` + +## Details + +At its core, this library provides a `Convert` function, which automatically +handles converting fields with the same name, and "convertable" +types. Some examples are: +* `string` -> `string` +* `string` -> `*string` +* `int` -> `string` +* `string` -> `[]string` + +The automatic conversions are implemented when there is an obvious way +to convert between the types. A lot more automatic conversions happen +-- see [the converter tests](converter_test.go) for a more comprehensive +list of what is currently supported. + +Not everything can be handled automatically, however, so there is also +a `ConvertFrom` interface any struct in the graph can implement to +perform custom conversion, similar to how the stdlib `MarshalJSON` and +`UnmarshalJSON` would be implemented. + +Additionally, and maybe most importantly, there is a `converter.Chain` available, +which orchestrates conversions between _multiple versions_ of structs. This could +be thought of similar to database migrations: given a starting struct and a target +struct, the `chain.Convert` function iterates through every intermediary migration +in order to arrive at the target struct. + +## Basic Usage + +To illustrate usage we'll start with a few basic structs, some of which +implement the `ConvertFrom` interface due to breaking changes: + +```go +// --------- V1 struct definition below --------- + +type V1 struct { + Name string + OldField string +} + +// --------- V2 struct definition below --------- + +type V2 struct { + Name string + NewField string // this was a renamed field +} + +func (to *V2) ConvertFrom(from interface{}) error { + if from, ok := from.(V1); ok { // forward migration + to.NewField = from.OldField + } + return nil +} + +// --------- V3 struct definition below --------- + +type V3 struct { + Name []string + FinalField []string // this field was renamed and the type was changed +} + +func (to *V3) ConvertFrom(from interface{}) error { + if from, ok := from.(V2); ok { // forward migration + to.FinalField = []string{from.NewField} + } + return nil +} +``` + +Given these type definitions, we can easily set up a conversion chain +like this: + +```go +chain := converter.NewChain(V1{}, V2{}, V3{}) +``` + +This chain can then be used to convert from an _older version_ to a _newer +version_. This is because our `ConvertFrom` definitions are only handling +_forward_ migrations. + +This chain can be used to convert from a `V1` struct to a `V3` struct easily, +like this: + +```go +v1 := // somehow get a populated v1 struct +v3 := V3{} +chain.Convert(v1, &v3) +``` + +Since we've defined our chain as `V1` → `V2` → `V3`, the chain will execute +conversions to all intermediary structs (`V2`, in this case) and ultimately end +when we've populated the `v3` instance. + +Note we haven't needed to define any conversions on the `Name` field of any structs +since this one is convertible between structs: `string` → `string` → `[]string`. + +## Backwards Migrations + +If we wanted to _also_ provide backwards migrations, we could also easily add a case +to the `ConvertFrom` methods. The whole set of structs would look something like this: + + +```go +// --------- V1 struct definition below --------- + +type V1 struct { + Name string + OldField string +} + +func (to *V1) ConvertFrom(from interface{}) error { + if from, ok := from.(V2); ok { // backward migration + to.OldField = from.NewField + } + return nil +} + +// --------- V2 struct definition below --------- + +type V2 struct { + Name string + NewField string +} + +func (to *V2) ConvertFrom(from interface{}) error { + if from, ok := from.(V1); ok { // forward migration + to.NewField = from.OldField + } + if from, ok := from.(V3); ok { // backward migration + to.NewField = from.FinalField[0] + } + return nil +} + +// --------- V3 struct definition below --------- + +type V3 struct { + Name []string + FinalField []string +} + +func (to *V3) ConvertFrom(from interface{}) error { + if from, ok := from.(V2); ok { // forward migration + to.FinalField = []string{from.NewField} + } + return nil +} +``` + +At this point we could convert in either direction, for example a +`V3` struct could convert to a `V1` struct, with the caveat that there +may be data loss, as might need to happen due to changes in the data shapes. + +## Contributing + +If you would like to contribute to this repository, please see the +[CONTRIBUTING.md](CONTRIBUTING.md). diff --git a/vendor/github.com/anchore/go-struct-converter/chain.go b/vendor/github.com/anchore/go-struct-converter/chain.go new file mode 100644 index 0000000000..41aa0e1d7f --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/chain.go @@ -0,0 +1,95 @@ +package converter + +import ( + "fmt" + "reflect" +) + +// NewChain takes a set of structs, in order, to allow for accurate chain.Convert(from, &to) calls. NewChain should +// be called with struct values in a manner similar to this: +// converter.NewChain(v1.Document{}, v2.Document{}, v3.Document{}) +func NewChain(structs ...interface{}) Chain { + out := Chain{} + for _, s := range structs { + typ := reflect.TypeOf(s) + if isPtr(typ) { // these shouldn't be pointers, but check just to be safe + typ = typ.Elem() + } + out.Types = append(out.Types, typ) + } + return out +} + +// Chain holds a set of types with which to migrate through when a `chain.Convert` call is made +type Chain struct { + Types []reflect.Type +} + +// Convert converts from one type in the chain to the target type, calling each conversion in between +func (c Chain) Convert(from interface{}, to interface{}) (err error) { + fromValue := reflect.ValueOf(from) + fromType := fromValue.Type() + + // handle incoming pointers + for isPtr(fromType) { + fromValue = fromValue.Elem() + fromType = fromType.Elem() + } + + toValuePtr := reflect.ValueOf(to) + toTypePtr := toValuePtr.Type() + + if !isPtr(toTypePtr) { + return fmt.Errorf("TO struct provided not a pointer, unable to set values: %v", to) + } + + // toValue must be a pointer but need a reference to the struct type directly + toValue := toValuePtr.Elem() + toType := toValue.Type() + + fromIdx := -1 + toIdx := -1 + + for i, typ := range c.Types { + if typ == fromType { + fromIdx = i + } + if typ == toType { + toIdx = i + } + } + + if fromIdx == -1 { + return fmt.Errorf("invalid FROM type provided, not in the conversion chain: %s", fromType.Name()) + } + + if toIdx == -1 { + return fmt.Errorf("invalid TO type provided, not in the conversion chain: %s", toType.Name()) + } + + last := from + for i := fromIdx; i != toIdx; { + // skip the first index, because that is the from type - start with the next conversion in the chain + if fromIdx < toIdx { + i++ + } else { + i-- + } + + var next interface{} + if i == toIdx { + next = to + } else { + nextVal := reflect.New(c.Types[i]) + next = nextVal.Interface() // this will be a pointer, which is fine to pass to both from and to in Convert + } + + if err = Convert(last, next); err != nil { + return err + } + + last = next + } + + return nil +} diff --git a/vendor/github.com/anchore/go-struct-converter/converter.go b/vendor/github.com/anchore/go-struct-converter/converter.go new file mode 100644 index 0000000000..57d1b332db --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/converter.go @@ -0,0 +1,334 @@ +package converter + +import ( + "fmt" + "reflect" + "strconv" +) + +// ConvertFrom interface allows structs to define custom conversion functions if the automated reflection-based Convert +// is not able to convert properties due to name changes or other factors. +type ConvertFrom interface { + ConvertFrom(interface{}) error +} + +// Convert takes two objects, e.g. v2_1.Document and &v2_2.Document{} and attempts to map all the properties from one +// to the other. After the automatic mapping, if a struct implements the ConvertFrom interface, this is called to +// perform any additional conversion logic necessary. +func Convert(from interface{}, to interface{}) error { + fromValue := reflect.ValueOf(from) + + toValuePtr := reflect.ValueOf(to) + toTypePtr := toValuePtr.Type() + + if !isPtr(toTypePtr) { + return fmt.Errorf("TO value provided was not a pointer, unable to set value: %v", to) + } + + toValue, err := getValue(fromValue, toTypePtr) + if err != nil { + return err + } + + // don't set nil values + if toValue == nilValue { + return nil + } + + // toValuePtr is the passed-in pointer, toValue is also the same type of pointer + toValuePtr.Elem().Set(toValue.Elem()) + return nil +} + +func getValue(fromValue reflect.Value, targetType reflect.Type) (reflect.Value, error) { + var err error + + fromType := fromValue.Type() + + var toValue reflect.Value + + // handle incoming pointer Types + if isPtr(fromType) { + if fromValue.IsNil() { + return nilValue, nil + } + fromValue = fromValue.Elem() + if !fromValue.IsValid() || fromValue.IsZero() { + return nilValue, nil + } + fromType = fromValue.Type() + } + + baseTargetType := targetType + if isPtr(targetType) { + baseTargetType = targetType.Elem() + } + + switch { + case isStruct(fromType) && isStruct(baseTargetType): + // this always creates a pointer type + toValue = reflect.New(baseTargetType) + toValue = toValue.Elem() + + for i := 0; i < fromType.NumField(); i++ { + fromField := fromType.Field(i) + fromFieldValue := fromValue.Field(i) + + toField, exists := baseTargetType.FieldByName(fromField.Name) + if !exists { + continue + } + toFieldType := toField.Type + + toFieldValue := toValue.FieldByName(toField.Name) + + newValue, err := getValue(fromFieldValue, toFieldType) + if err != nil { + return nilValue, err + } + + if newValue == nilValue { + continue + } + + toFieldValue.Set(newValue) + } + + // allow structs to implement a custom convert function from previous/next version struct + if reflect.PtrTo(baseTargetType).Implements(convertFromType) { + convertFrom := toValue.Addr().MethodByName(convertFromName) + if !convertFrom.IsValid() { + return nilValue, fmt.Errorf("unable to get ConvertFrom method") + } + args := []reflect.Value{fromValue} + out := convertFrom.Call(args) + err := out[0].Interface() + if err != nil { + return nilValue, fmt.Errorf("an error occurred calling %s.%s: %v", baseTargetType.Name(), convertFromName, err) + } + } + case isSlice(fromType) && isSlice(baseTargetType): + if fromValue.IsNil() { + return nilValue, nil + } + + length := fromValue.Len() + targetElementType := baseTargetType.Elem() + toValue = reflect.MakeSlice(baseTargetType, length, length) + for i := 0; i < length; i++ { + v, err := getValue(fromValue.Index(i), targetElementType) + if err != nil { + return nilValue, err + } + if v.IsValid() { + toValue.Index(i).Set(v) + } + } + case isMap(fromType) && isMap(baseTargetType): + if fromValue.IsNil() { + return nilValue, nil + } + + keyType := baseTargetType.Key() + elementType := baseTargetType.Elem() + toValue = reflect.MakeMap(baseTargetType) + for _, fromKey := range fromValue.MapKeys() { + fromVal := fromValue.MapIndex(fromKey) + k, err := getValue(fromKey, keyType) + if err != nil { + return nilValue, err + } + v, err := getValue(fromVal, elementType) + if err != nil { + return nilValue, err + } + if k == nilValue || v == nilValue { + continue + } + if v == nilValue { + continue + } + if k.IsValid() && v.IsValid() { + toValue.SetMapIndex(k, v) + } + } + default: + // TODO determine if there are other conversions + toValue = fromValue + } + + // handle non-pointer returns -- the reflect.New earlier always creates a pointer + if !isPtr(baseTargetType) { + toValue = fromPtr(toValue) + } + + toValue, err = convertValueTypes(toValue, baseTargetType) + + if err != nil { + return nilValue, err + } + + // handle elements which are now pointers + if isPtr(targetType) { + toValue = toPtr(toValue) + } + + return toValue, nil +} + +// convertValueTypes takes a value and a target type, and attempts to convert +// between the Types - e.g. string -> int. when this function is called the value +func convertValueTypes(value reflect.Value, targetType reflect.Type) (reflect.Value, error) { + typ := value.Type() + switch { + // if the Types are the same, just return the value + case typ.Kind() == targetType.Kind(): + return value, nil + case value.IsZero() && isPrimitive(targetType): + + case isPrimitive(typ) && isPrimitive(targetType): + // get a string representation of the value + str := fmt.Sprintf("%v", value.Interface()) // TODO is there a better way to get a string representation? + var err error + var out interface{} + switch { + case isString(targetType): + out = str + case isBool(targetType): + out, err = strconv.ParseBool(str) + case isInt(targetType): + out, err = strconv.Atoi(str) + case isUint(targetType): + out, err = strconv.ParseUint(str, 10, 64) + case isFloat(targetType): + out, err = strconv.ParseFloat(str, 64) + } + + if err != nil { + return nilValue, err + } + + v := reflect.ValueOf(out) + + v = v.Convert(targetType) + + return v, nil + case isSlice(typ) && isSlice(targetType): + // this should already be handled in getValue + case isSlice(typ): + // this may be lossy + if value.Len() > 0 { + v := value.Index(0) + v, err := convertValueTypes(v, targetType) + if err != nil { + return nilValue, err + } + return v, nil + } + return convertValueTypes(nilValue, targetType) + case isSlice(targetType): + elementType := targetType.Elem() + v, err := convertValueTypes(value, elementType) + if err != nil { + return nilValue, err + } + if v == nilValue { + return v, nil + } + slice := reflect.MakeSlice(targetType, 1, 1) + slice.Index(0).Set(v) + return slice, nil + } + + return nilValue, fmt.Errorf("unable to convert from: %v to %v", value.Interface(), targetType.Name()) +} + +func isPtr(typ reflect.Type) bool { + return typ.Kind() == reflect.Ptr +} + +func isPrimitive(typ reflect.Type) bool { + return isString(typ) || isBool(typ) || isInt(typ) || isUint(typ) || isFloat(typ) +} + +func isString(typ reflect.Type) bool { + return typ.Kind() == reflect.String +} + +func isBool(typ reflect.Type) bool { + return typ.Kind() == reflect.Bool +} + +func isInt(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64: + return true + } + return false +} + +func isUint(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Uint, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64: + return true + } + return false +} + +func isFloat(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Float32, + reflect.Float64: + return true + } + return false +} + +func isStruct(typ reflect.Type) bool { + return typ.Kind() == reflect.Struct +} + +func isSlice(typ reflect.Type) bool { + return typ.Kind() == reflect.Slice +} + +func isMap(typ reflect.Type) bool { + return typ.Kind() == reflect.Map +} + +func toPtr(val reflect.Value) reflect.Value { + typ := val.Type() + if !isPtr(typ) { + // this creates a pointer type inherently + ptrVal := reflect.New(typ) + ptrVal.Elem().Set(val) + val = ptrVal + } + return val +} + +func fromPtr(val reflect.Value) reflect.Value { + if isPtr(val.Type()) { + val = val.Elem() + } + return val +} + +// convertFromName constant to find the ConvertFrom method +const convertFromName = "ConvertFrom" + +var ( + // nilValue is returned in a number of cases when a value should not be set + nilValue = reflect.ValueOf(nil) + + // convertFromType is the type to check for ConvertFrom implementations + convertFromType = reflect.TypeOf((*ConvertFrom)(nil)).Elem() +) diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go index b7b9f2a2b7..d590b48b6e 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go @@ -17,7 +17,6 @@ const ( LayerAnnotationNydusBlob = "containerd.io/snapshot/nydus-blob" LayerAnnotationNydusBlobDigest = "containerd.io/snapshot/nydus-blob-digest" LayerAnnotationNydusBlobSize = "containerd.io/snapshot/nydus-blob-size" - LayerAnnotationNydusBlobIDs = "containerd.io/snapshot/nydus-blob-ids" LayerAnnotationNydusBootstrap = "containerd.io/snapshot/nydus-bootstrap" LayerAnnotationNydusSourceChainID = "containerd.io/snapshot/nydus-source-chainid" diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go index dc0130aefe..fd9c49cda1 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go @@ -11,9 +11,10 @@ package converter import ( "archive/tar" + "bytes" "compress/gzip" "context" - "encoding/json" + "encoding/binary" "fmt" "io" "os" @@ -24,10 +25,12 @@ import ( "github.com/containerd/containerd/archive" "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/images/converter" "github.com/containerd/containerd/labels" "github.com/containerd/fifo" + "github.com/klauspost/compress/zstd" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -35,11 +38,14 @@ import ( "golang.org/x/sync/errgroup" "github.com/containerd/nydus-snapshotter/pkg/converter/tool" - "github.com/containerd/nydus-snapshotter/pkg/errdefs" + "github.com/containerd/nydus-snapshotter/pkg/label" ) -const bootstrapNameInTar = "image.boot" -const blobNameInTar = "image.blob" +const EntryBlob = "image.blob" +const EntryBootstrap = "image.boot" +const EntryBlobMeta = "blob.meta" +const EntryBlobMetaHeader = "blob.meta.header" +const EntryTOC = "rafs.blob.toc" const envNydusBuilder = "NYDUS_BUILDER" const envNydusWorkDir = "NYDUS_WORKDIR" @@ -113,152 +119,190 @@ func unpackOciTar(ctx context.Context, dst string, reader io.Reader) error { return nil } -// Unpack a Nydus formatted tar stream into a directory. -func unpackNydusTar(ctx context.Context, bootDst, blobDst string, ra content.ReaderAt) error { +// unpackNydusBlob unpacks a Nydus formatted tar stream into a directory. +// unpackBlob indicates whether to unpack blob data. +func unpackNydusBlob(bootDst, blobDst string, ra content.ReaderAt, unpackBlob bool) error { boot, err := os.OpenFile(bootDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { return errors.Wrapf(err, "write to bootstrap %s", bootDst) } defer boot.Close() - if err = unpackBootstrapFromNydusTar(ctx, ra, boot); err != nil { + if _, err = UnpackEntry(ra, EntryBootstrap, boot); err != nil { return errors.Wrap(err, "unpack bootstrap from nydus") } - blob, err := os.OpenFile(blobDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - if err != nil { - return errors.Wrapf(err, "write to blob %s", blobDst) - } - defer blob.Close() + if unpackBlob { + blob, err := os.OpenFile(blobDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return errors.Wrapf(err, "write to blob %s", blobDst) + } + defer blob.Close() - if err = unpackBlobFromNydusTar(ctx, ra, blob); err != nil { - return errors.Wrap(err, "unpack blob from nydus") + if _, err = UnpackEntry(ra, EntryBlob, blob); err != nil { + if errors.Is(err, ErrNotFound) { + // The nydus layer may contain only bootstrap and no blob + // data, which should be ignored. + return nil + } + return errors.Wrap(err, "unpack blob from nydus") + } } return nil } -// Unpack the bootstrap from nydus formatted tar stream (blob + bootstrap). -// The nydus formatted tar stream is a tar-like structure that arranges the -// data as follows: -// -// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` -func unpackBootstrapFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error { - cur := ra.Size() - reader := newSeekReader(ra) - +func seekFileByTarHeader(ra content.ReaderAt, targetName string, handle func(io.Reader, *tar.Header) error) error { const headerSize = 512 - // Seek from tail to head of nydus formatted tar stream to find nydus - // bootstrap data. - for { - if headerSize > cur { - return fmt.Errorf("invalid tar format at pos %d", cur) - } - - // Try to seek to the part of tar header. - var err error - cur, err = reader.Seek(cur-headerSize, io.SeekCurrent) - if err != nil { - return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize) - } - - tr := tar.NewReader(reader) - // Parse tar header. - hdr, err := tr.Next() - if err != nil { - return errors.Wrap(err, "parse tar header") - } - - if hdr.Name == bootstrapNameInTar { - // Try to seek to the part of tar data (bootstrap_data). - if hdr.Size > cur { - return fmt.Errorf("invalid tar format at pos %d", cur) - } - bootstrapOffset := cur - hdr.Size - _, err = reader.Seek(bootstrapOffset, io.SeekStart) - if err != nil { - return errors.Wrap(err, "seek to bootstrap data offset") - } - - // Copy tar data (bootstrap_data) to provided target writer. - if _, err := io.CopyN(target, reader, hdr.Size); err != nil { - return errors.Wrap(err, "copy bootstrap data to reader") - } - - return nil - } - - if cur == hdr.Size { - break - } + if headerSize > ra.Size() { + return fmt.Errorf("invalid nydus tar size %d", ra.Size()) } - return fmt.Errorf("can't find bootstrap in nydus tar") -} - -// Unpack the blob from nydus formatted tar stream (blob + bootstrap). -// The nydus formatted tar stream is a tar-like structure that arranges the -// data as follows: -// -// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` -func unpackBlobFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error { - cur := ra.Size() + cur := ra.Size() - headerSize reader := newSeekReader(ra) - const headerSize = 512 - - // Seek from tail to head of nydus formatted tar stream to find nydus - // bootstrap data. + // Seek from tail to head of nydus formatted tar stream to find + // target data. for { - if headerSize > cur { - break - } - - // Try to seek to the part of tar header. - var err error - cur, err = reader.Seek(cur-headerSize, io.SeekStart) + // Try to seek the part of tar header. + _, err := reader.Seek(cur, io.SeekStart) if err != nil { - return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize) + return errors.Wrapf(err, "seek %d for nydus tar header", cur) } - tr := tar.NewReader(reader) // Parse tar header. + tr := tar.NewReader(reader) hdr, err := tr.Next() if err != nil { - return errors.Wrap(err, "parse tar header") + return errors.Wrap(err, "parse nydus tar header") } - if hdr.Name == bootstrapNameInTar { - if hdr.Size > cur { - return fmt.Errorf("invalid tar format at pos %d", cur) - } - cur, err = reader.Seek(cur-hdr.Size, io.SeekStart) - if err != nil { - return errors.Wrap(err, "seek to bootstrap data offset") - } - } else if hdr.Name == blobNameInTar { - if hdr.Size > cur { - return fmt.Errorf("invalid tar format at pos %d", cur) - } + if cur < hdr.Size { + return fmt.Errorf("invalid nydus tar data, name %s, size %d", hdr.Name, hdr.Size) + } + + if hdr.Name == targetName { + // Try to seek the part of tar data. _, err = reader.Seek(cur-hdr.Size, io.SeekStart) if err != nil { - return errors.Wrap(err, "seek to blob data offset") + return errors.Wrap(err, "seek target data offset") } - if _, err := io.CopyN(target, reader, hdr.Size); err != nil { - return errors.Wrap(err, "copy blob data to reader") + dataReader := io.NewSectionReader(reader, cur-hdr.Size, hdr.Size) + + if err := handle(dataReader, hdr); err != nil { + return errors.Wrap(err, "handle target data") } + return nil } + + cur = cur - hdr.Size - headerSize + if cur < 0 { + break + } } - return nil + return errors.Wrapf(ErrNotFound, "can't find target %s by seeking tar", targetName) +} + +func seekFileByTOC(ra content.ReaderAt, targetName string, handle func(io.Reader, *tar.Header) error) (*TOCEntry, error) { + entrySize := 128 + var tocEntry *TOCEntry + + err := seekFileByTarHeader(ra, EntryTOC, func(tocEntryDataReader io.Reader, _ *tar.Header) error { + entryData, err := io.ReadAll(tocEntryDataReader) + if err != nil { + return errors.Wrap(err, "read toc entries") + } + if len(entryData)%entrySize != 0 { + return fmt.Errorf("invalid entries length %d", len(entryData)) + } + + count := len(entryData) / entrySize + for i := 0; i < count; i++ { + var entry TOCEntry + r := bytes.NewReader(entryData[i*entrySize : i*entrySize+entrySize]) + if err := binary.Read(r, binary.LittleEndian, &entry); err != nil { + return errors.Wrap(err, "read toc entries") + } + if entry.GetName() == targetName { + compressor, err := entry.GetCompressor() + if err != nil { + return errors.Wrap(err, "get compressor of entry") + } + compressedOffset := int64(entry.GetCompressedOffset()) + compressedSize := int64(entry.GetCompressedSize()) + sr := io.NewSectionReader(ra, compressedOffset, compressedSize) + + var rd io.Reader + switch compressor { + case CompressorZstd: + decoder, err := zstd.NewReader(sr) + if err != nil { + return errors.Wrap(err, "seek to target data offset") + } + defer decoder.Close() + rd = decoder + case CompressorNone: + rd = sr + default: + return fmt.Errorf("unsupported compressor %x", compressor) + } + + if err := handle(rd, nil); err != nil { + return errors.Wrap(err, "handle target entry data") + } + + tocEntry = &entry + + return nil + } + } + + return errors.Wrapf(ErrNotFound, "can't find target %s by seeking TOC", targetName) + }) + + return tocEntry, err +} + +// Unpack the file from nydus formatted tar stream. +// The nydus formatted tar stream is a tar-like structure that arranges the +// data as follows: +// +// `data | tar_header | ... | data | tar_header | [toc_entry | ... | toc_entry | tar_header]` +func UnpackEntry(ra content.ReaderAt, targetName string, target io.Writer) (*TOCEntry, error) { + handle := func(dataReader io.Reader, _ *tar.Header) error { + // Copy data to provided target writer. + if _, err := io.Copy(target, dataReader); err != nil { + return errors.Wrap(err, "copy target data to reader") + } + + return nil + } + + return seekFile(ra, targetName, handle) +} + +func seekFile(ra content.ReaderAt, targetName string, handle func(io.Reader, *tar.Header) error) (*TOCEntry, error) { + // Try seek target data by TOC. + entry, err := seekFileByTOC(ra, targetName, handle) + if err != nil { + if !errors.Is(err, ErrNotFound) { + return nil, errors.Wrap(err, "seek file by TOC") + } + } else { + return entry, nil + } + + // Seek target data by tar header, ensure compatible with old rafs blob format. + return nil, seekFileByTarHeader(ra, targetName, handle) } // Pack converts an OCI tar stream to nydus formatted stream with a tar-like // structure that arranges the data as follows: // -// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` +// `data | tar_header | data | tar_header | [toc_entry | ... | toc_entry | tar_header]` // // The caller should write OCI tar stream into the returned `io.WriteCloser`, // then the Pack method will write the nydus formatted stream to `dest` @@ -267,6 +311,24 @@ func unpackBlobFromNydusTar(ctx context.Context, ra content.ReaderAt, target io. // Important: the caller must check `io.WriteCloser.Close() == nil` to ensure // the conversion workflow is finished. func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) { + if opt.FsVersion == "" { + opt.FsVersion = "6" + } + + builderPath := getBuilder(opt.BuilderPath) + opt.features = tool.DetectFeatures(builderPath, []tool.Feature{tool.FeatureTar2Rafs}) + + if opt.OCIRef { + if opt.FsVersion == "6" { + return packFromTar(ctx, dest, opt) + } + return nil, fmt.Errorf("oci ref can only be supported by fs version 6") + } + + if opt.features.Contains(tool.FeatureTar2Rafs) { + return packFromTar(ctx, dest, opt) + } + workDir, err := ensureWorkDir(opt.WorkDir) if err != nil { return nil, errors.Wrap(err, "ensure work directory") @@ -295,9 +357,7 @@ func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, }() wc := newWriteCloser(pw, func() error { - defer func() { - os.RemoveAll(workDir) - }() + defer os.RemoveAll(workDir) // Because PipeWriter#Close is called does not mean that the PipeReader // has finished reading all the data, and unpack may not be complete yet, @@ -313,15 +373,19 @@ func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, go func() { err := tool.Pack(tool.PackOption{ - BuilderPath: getBuilder(opt.BuilderPath), + BuilderPath: builderPath, BlobPath: blobPath, FsVersion: opt.FsVersion, SourcePath: sourceDir, ChunkDictPath: opt.ChunkDictPath, PrefetchPatterns: opt.PrefetchPatterns, + AlignedChunk: opt.AlignedChunk, + ChunkSize: opt.ChunkSize, Compressor: opt.Compressor, Timeout: opt.Timeout, + + Features: opt.features, }) if err != nil { pw.CloseWithError(errors.Wrapf(err, "convert blob for %s", sourceDir)) @@ -341,6 +405,117 @@ func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, return wc, nil } +func packFromTar(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) { + workDir, err := ensureWorkDir(opt.WorkDir) + if err != nil { + return nil, errors.Wrap(err, "ensure work directory") + } + defer func() { + if err != nil { + os.RemoveAll(workDir) + } + }() + + rafsBlobPath := filepath.Join(workDir, "blob.rafs") + rafsBlobFifo, err := fifo.OpenFifo(ctx, rafsBlobPath, syscall.O_CREAT|syscall.O_RDONLY|syscall.O_NONBLOCK, 0644) + if err != nil { + return nil, errors.Wrapf(err, "create fifo file") + } + + tarBlobPath := filepath.Join(workDir, "blob.targz") + tarBlobFifo, err := fifo.OpenFifo(ctx, tarBlobPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_NONBLOCK, 0644) + if err != nil { + defer rafsBlobFifo.Close() + return nil, errors.Wrapf(err, "create fifo file") + } + + pr, pw := io.Pipe() + eg := errgroup.Group{} + + wc := newWriteCloser(pw, func() error { + defer os.RemoveAll(workDir) + if err := eg.Wait(); err != nil { + return errors.Wrapf(err, "convert nydus ref") + } + return nil + }) + + eg.Go(func() error { + defer tarBlobFifo.Close() + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(tarBlobFifo, pr, *buffer); err != nil { + return errors.Wrapf(err, "copy targz to fifo") + } + return nil + }) + + eg.Go(func() error { + defer rafsBlobFifo.Close() + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(dest, rafsBlobFifo, *buffer); err != nil { + return errors.Wrapf(err, "copy blob meta fifo to nydus blob") + } + return nil + }) + + eg.Go(func() error { + var err error + if opt.OCIRef { + err = tool.Pack(tool.PackOption{ + BuilderPath: getBuilder(opt.BuilderPath), + + OCIRef: opt.OCIRef, + BlobPath: rafsBlobPath, + SourcePath: tarBlobPath, + Timeout: opt.Timeout, + + Features: opt.features, + }) + } else { + err = tool.Pack(tool.PackOption{ + BuilderPath: getBuilder(opt.BuilderPath), + + BlobPath: rafsBlobPath, + FsVersion: opt.FsVersion, + SourcePath: tarBlobPath, + ChunkDictPath: opt.ChunkDictPath, + PrefetchPatterns: opt.PrefetchPatterns, + AlignedChunk: opt.AlignedChunk, + ChunkSize: opt.ChunkSize, + BatchSize: opt.BatchSize, + Compressor: opt.Compressor, + Timeout: opt.Timeout, + + Features: opt.features, + }) + } + if err != nil { + // Without handling the returned error because we just only + // focus on the command exit status in `tool.Pack`. + wc.Close() + } + return errors.Wrapf(err, "call builder") + }) + + return wc, nil +} + +func calcBlobTOCDigest(ra content.ReaderAt) (*digest.Digest, error) { + digester := digest.Canonical.Digester() + if err := seekFileByTarHeader(ra, EntryTOC, func(tocData io.Reader, _ *tar.Header) error { + if _, err := io.Copy(digester.Hash(), tocData); err != nil { + return errors.Wrap(err, "calc toc data and header digest") + } + return nil + }); err != nil { + return nil, err + } + tocDigest := digester.Digest() + return &tocDigest, nil +} + // Merge multiple nydus bootstraps (from each layer of image) to a final // bootstrap. And due to the possibility of enabling the `ChunkDictPath` // option causes the data deduplication, it will return the actual blob @@ -352,22 +527,40 @@ func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) } defer os.RemoveAll(workDir) - eg, ctx := errgroup.WithContext(ctx) + getBootstrapPath := func(layerIdx int) string { + digestHex := layers[layerIdx].Digest.Hex() + if originalDigest := layers[layerIdx].OriginalDigest; originalDigest != nil { + return filepath.Join(workDir, originalDigest.Hex()) + } + return filepath.Join(workDir, digestHex) + } + + eg, _ := errgroup.WithContext(ctx) sourceBootstrapPaths := []string{} + rafsBlobDigests := []string{} + rafsBlobSizes := []int64{} + rafsBlobTOCDigests := []string{} for idx := range layers { - sourceBootstrapPaths = append(sourceBootstrapPaths, filepath.Join(workDir, layers[idx].Digest.Hex())) + sourceBootstrapPaths = append(sourceBootstrapPaths, getBootstrapPath(idx)) + if layers[idx].OriginalDigest != nil { + rafsBlobDigests = append(rafsBlobDigests, layers[idx].Digest.Hex()) + rafsBlobSizes = append(rafsBlobSizes, layers[idx].ReaderAt.Size()) + rafsBlobTOCDigest, err := calcBlobTOCDigest(layers[idx].ReaderAt) + if err != nil { + return nil, errors.Wrapf(err, "calc blob toc digest for layer %s", layers[idx].Digest) + } + rafsBlobTOCDigests = append(rafsBlobTOCDigests, rafsBlobTOCDigest.Hex()) + } eg.Go(func(idx int) func() error { return func() error { - layer := layers[idx] - // Use the hex hash string of whole tar blob as the bootstrap name. - bootstrap, err := os.Create(filepath.Join(workDir, layer.Digest.Hex())) + bootstrap, err := os.Create(getBootstrapPath(idx)) if err != nil { return errors.Wrap(err, "create source bootstrap") } defer bootstrap.Close() - if err := unpackBootstrapFromNydusTar(ctx, layer.ReaderAt, bootstrap); err != nil { + if _, err := UnpackEntry(layers[idx].ReaderAt, EntryBootstrap, bootstrap); err != nil { return errors.Wrap(err, "unpack nydus tar") } @@ -386,11 +579,16 @@ func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) BuilderPath: getBuilder(opt.BuilderPath), SourceBootstrapPaths: sourceBootstrapPaths, - TargetBootstrapPath: targetBootstrapPath, - ChunkDictPath: opt.ChunkDictPath, - PrefetchPatterns: opt.PrefetchPatterns, - OutputJSONPath: filepath.Join(workDir, "merge-output.json"), - Timeout: opt.Timeout, + RafsBlobDigests: rafsBlobDigests, + RafsBlobSizes: rafsBlobSizes, + RafsBlobTOCDigests: rafsBlobTOCDigests, + + TargetBootstrapPath: targetBootstrapPath, + ChunkDictPath: opt.ChunkDictPath, + ParentBootstrapPath: opt.ParentBootstrapPath, + PrefetchPatterns: opt.PrefetchPatterns, + OutputJSONPath: filepath.Join(workDir, "merge-output.json"), + Timeout: opt.Timeout, }) if err != nil { return nil, errors.Wrap(err, "merge bootstrap") @@ -399,7 +597,7 @@ func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) var rc io.ReadCloser if opt.WithTar { - rc, err = packToTar(targetBootstrapPath, fmt.Sprintf("image/%s", bootstrapNameInTar), false) + rc, err = packToTar(targetBootstrapPath, fmt.Sprintf("image/%s", EntryBootstrap), false) if err != nil { return nil, errors.Wrap(err, "pack bootstrap to tar") } @@ -428,8 +626,8 @@ func Unpack(ctx context.Context, ra content.ReaderAt, dest io.Writer, opt Unpack } defer os.RemoveAll(workDir) - bootPath, blobPath := filepath.Join(workDir, bootstrapNameInTar), filepath.Join(workDir, blobNameInTar) - if err = unpackNydusTar(ctx, bootPath, blobPath, ra); err != nil { + bootPath, blobPath := filepath.Join(workDir, EntryBootstrap), filepath.Join(workDir, EntryBlob) + if err = unpackNydusBlob(bootPath, blobPath, ra, !opt.Stream); err != nil { return errors.Wrap(err, "unpack nydus tar") } @@ -440,16 +638,35 @@ func Unpack(ctx context.Context, ra content.ReaderAt, dest io.Writer, opt Unpack } defer blobFifo.Close() + unpackOpt := tool.UnpackOption{ + BuilderPath: getBuilder(opt.BuilderPath), + BootstrapPath: bootPath, + BlobPath: blobPath, + TarPath: tarPath, + Timeout: opt.Timeout, + } + + if opt.Stream { + proxy, err := setupContentStoreProxy(opt.WorkDir, ra) + if err != nil { + return errors.Wrap(err, "new content store proxy") + } + defer proxy.close() + + // generate backend config file + backendConfigStr := fmt.Sprintf(`{"version":2,"backend":{"type":"http-proxy","http-proxy":{"addr":"%s"}}}`, proxy.socketPath) + backendConfigPath := filepath.Join(workDir, "backend-config.json") + if err := os.WriteFile(backendConfigPath, []byte(backendConfigStr), 0644); err != nil { + return errors.Wrap(err, "write backend config") + } + unpackOpt.BlobPath = "" + unpackOpt.BackendConfigPath = backendConfigPath + } + unpackErrChan := make(chan error) go func() { defer close(unpackErrChan) - err := tool.Unpack(tool.UnpackOption{ - BuilderPath: getBuilder(opt.BuilderPath), - BootstrapPath: bootPath, - BlobPath: blobPath, - TarPath: tarPath, - Timeout: opt.Timeout, - }) + err := tool.Unpack(unpackOpt) if err != nil { blobFifo.Close() unpackErrChan <- err @@ -476,11 +693,11 @@ func IsNydusBlobAndExists(ctx context.Context, cs content.Store, desc ocispec.De return false } - return IsNydusBlob(ctx, desc) + return IsNydusBlob(desc) } -// IsNydusBlob returns true when the specified descriptor is nydus blob format. -func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool { +// IsNydusBlob returns true when the specified descriptor is nydus blob layer. +func IsNydusBlob(desc ocispec.Descriptor) bool { if desc.Annotations == nil { return false } @@ -489,6 +706,16 @@ func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool { return hasAnno } +// IsNydusBootstrap returns true when the specified descriptor is nydus bootstrap layer. +func IsNydusBootstrap(desc ocispec.Descriptor) bool { + if desc.Annotations == nil { + return false + } + + _, hasAnno := desc.Annotations[LayerAnnotationNydusBootstrap] + return hasAnno +} + // LayerConvertFunc returns a function which converts an OCI image layer to // a nydus blob layer, and set the media type to "application/vnd.oci.image.layer.nydus.blob.v1". func LayerConvertFunc(opt PackOption) converter.ConvertFunc { @@ -497,6 +724,11 @@ func LayerConvertFunc(opt PackOption) converter.ConvertFunc { return nil, nil } + // Skip the conversion of nydus layer. + if IsNydusBlob(desc) || IsNydusBootstrap(desc) { + return nil, nil + } + ra, err := cs.ReaderAt(ctx, desc) if err != nil { return nil, errors.Wrap(err, "get source blob reader") @@ -511,9 +743,14 @@ func LayerConvertFunc(opt PackOption) converter.ConvertFunc { } defer dst.Close() - tr, err := compression.DecompressStream(rdr) - if err != nil { - return nil, errors.Wrap(err, "decompress blob stream") + var tr io.ReadCloser + if opt.OCIRef { + tr = io.NopCloser(rdr) + } else { + tr, err = compression.DecompressStream(rdr) + if err != nil { + return nil, errors.Wrap(err, "decompress blob stream") + } } digester := digest.SHA256.Digester() @@ -574,14 +811,12 @@ func LayerConvertFunc(opt PackOption) converter.ConvertFunc { }, } - if opt.Backend != nil { - blobRa, err := cs.ReaderAt(ctx, newDesc) - if err != nil { - return nil, errors.Wrap(err, "get nydus blob reader") - } - defer blobRa.Close() + if opt.OCIRef { + newDesc.Annotations[label.NydusRefLayer] = desc.Digest.String() + } - if err := opt.Backend.Push(ctx, blobRa, blobDigest); err != nil { + if opt.Backend != nil { + if err := opt.Backend.Push(ctx, cs, newDesc); err != nil { return nil, errors.Wrap(err, "push to storage backend") } } @@ -595,11 +830,15 @@ func LayerConvertFunc(opt PackOption) converter.ConvertFunc { // the index conversion and the manifest conversion. func ConvertHookFunc(opt MergeOption) converter.ConvertHookFunc { return func(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) { + // If the previous conversion did not occur, the `newDesc` may be nil. + if newDesc == nil { + return &orgDesc, nil + } switch { case images.IsIndexType(newDesc.MediaType): return convertIndex(ctx, cs, orgDesc, newDesc) case images.IsManifestType(newDesc.MediaType): - return convertManifest(ctx, cs, newDesc, opt) + return convertManifest(ctx, cs, orgDesc, newDesc, opt) default: return newDesc, nil } @@ -636,6 +875,13 @@ func convertIndex(ctx context.Context, cs content.Store, orgDesc ocispec.Descrip manifest.Platform.OSFeatures = append(manifest.Platform.OSFeatures, ManifestOSFeatureNydus) index.Manifests[i] = manifest } + + // If the converted manifest list contains only one manifest, + // convert it directly to manifest. + if len(index.Manifests) == 1 { + return &index.Manifests[0], nil + } + // Update image index in content store. newIndexDesc, err := writeJSON(ctx, cs, index, *newDesc, indexLabels) if err != nil { @@ -644,10 +890,23 @@ func convertIndex(ctx context.Context, cs content.Store, orgDesc ocispec.Descrip return newIndexDesc, nil } +// isNydusImage checks if the last layer is nydus bootstrap, +// so that we can ensure it is a nydus image. +func isNydusImage(manifest *ocispec.Manifest) bool { + layers := manifest.Layers + if len(layers) != 0 { + desc := layers[len(layers)-1] + if IsNydusBootstrap(desc) { + return true + } + } + return false +} + // convertManifest merges all the nydus blob layers into a // nydus bootstrap layer, update the image config, // and modify the image manifest. -func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) { +func convertManifest(ctx context.Context, cs content.Store, oldDesc ocispec.Descriptor, newDesc *ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) { var manifest ocispec.Manifest manifestDesc := *newDesc manifestLabels, err := readJSON(ctx, cs, &manifest, manifestDesc) @@ -655,14 +914,21 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des return nil, errors.Wrap(err, "read manifest json") } + if isNydusImage(&manifest) { + return &manifestDesc, nil + } + + // This option needs to be enabled for image scenario. + opt.WithTar = true + + // If the original image is already an OCI type, we should forcibly set the + // bootstrap layer to the OCI type. + if !opt.OCI && oldDesc.MediaType == ocispec.MediaTypeImageManifest { + opt.OCI = true + } + // Append bootstrap layer to manifest. - bootstrapDesc, blobDescs, err := MergeLayers(ctx, cs, manifest.Layers, MergeOption{ - BuilderPath: opt.BuilderPath, - WorkDir: opt.WorkDir, - ChunkDictPath: opt.ChunkDictPath, - FsVersion: opt.FsVersion, - WithTar: true, - }) + bootstrapDesc, blobDescs, err := MergeLayers(ctx, cs, manifest.Layers, opt) if err != nil { return nil, errors.Wrap(err, "merge nydus layers") } @@ -678,7 +944,8 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des // Affected by chunk dict, the blob list referenced by final bootstrap // are from different layers, part of them are from original layers, part // from chunk dict bootstrap, so we need to rewrite manifest's layers here. - manifest.Layers = append(blobDescs, *bootstrapDesc) + blobDescs := append(blobDescs, *bootstrapDesc) + manifest.Layers = blobDescs } // Update the gc label of bootstrap layer @@ -691,8 +958,13 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des if err != nil { return nil, errors.Wrap(err, "read image config") } + bootstrapHistory := ocispec.History{ + CreatedBy: "Nydus Converter", + Comment: "Nydus Bootstrap Layer", + } if opt.Backend != nil { config.RootFS.DiffIDs = []digest.Digest{digest.Digest(bootstrapDesc.Annotations[LayerAnnotationUncompressed])} + config.History = []ocispec.History{bootstrapHistory} } else { config.RootFS.DiffIDs = make([]digest.Digest, 0, len(manifest.Layers)) for i, layer := range manifest.Layers { @@ -700,6 +972,9 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des // Remove useless annotation. delete(manifest.Layers[i].Annotations, LayerAnnotationUncompressed) } + // Append history item for bootstrap layer, to ensure the history consistency. + // See https://github.com/distribution/distribution/blob/e5d5810851d1f17a5070e9b6f940d8af98ea3c29/manifest/schema1/config_builder.go#L136 + config.History = append(config.History, bootstrapHistory) } // Update image config in content store. newConfigDesc, err := writeJSON(ctx, cs, config, manifest.Config, configLabels) @@ -710,6 +985,11 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des // Update the config gc label manifestLabels[configGCLabelKey] = newConfigDesc.Digest.String() + // Associate a reference to the original OCI manifest. + // See the `subject` field description in + // https://github.com/opencontainers/image-spec/blob/main/manifest.md#image-manifest-property-descriptions + manifest.Subject = &oldDesc + // Update image manifest in content store. newManifestDesc, err := writeJSON(ctx, cs, manifest, manifestDesc, manifestLabels) if err != nil { @@ -726,33 +1006,45 @@ func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descript layers := []Layer{} var chainID digest.Digest - for _, blobDesc := range descs { - ra, err := cs.ReaderAt(ctx, blobDesc) + nydusBlobDigests := []digest.Digest{} + for _, nydusBlobDesc := range descs { + ra, err := cs.ReaderAt(ctx, nydusBlobDesc) if err != nil { - return nil, nil, errors.Wrapf(err, "get reader for blob %q", blobDesc.Digest) + return nil, nil, errors.Wrapf(err, "get reader for blob %q", nydusBlobDesc.Digest) } defer ra.Close() + var originalDigest *digest.Digest + if opt.OCIRef { + digestStr := nydusBlobDesc.Annotations[label.NydusRefLayer] + _originalDigest, err := digest.Parse(digestStr) + if err != nil { + return nil, nil, errors.Wrapf(err, "invalid label %s=%s", label.NydusRefLayer, digestStr) + } + originalDigest = &_originalDigest + } layers = append(layers, Layer{ - Digest: blobDesc.Digest, - ReaderAt: ra, + Digest: nydusBlobDesc.Digest, + OriginalDigest: originalDigest, + ReaderAt: ra, }) if chainID == "" { - chainID = identity.ChainID([]digest.Digest{blobDesc.Digest}) + chainID = identity.ChainID([]digest.Digest{nydusBlobDesc.Digest}) } else { - chainID = identity.ChainID([]digest.Digest{chainID, blobDesc.Digest}) + chainID = identity.ChainID([]digest.Digest{chainID, nydusBlobDesc.Digest}) } + nydusBlobDigests = append(nydusBlobDigests, nydusBlobDesc.Digest) } // Merge all nydus bootstraps into a final nydus bootstrap. pr, pw := io.Pipe() - blobDigestChan := make(chan []digest.Digest, 1) + originalBlobDigestChan := make(chan []digest.Digest, 1) go func() { defer pw.Close() - blobDigests, err := Merge(ctx, layers, pw, opt) + originalBlobDigests, err := Merge(ctx, layers, pw, opt) if err != nil { pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap")) } - blobDigestChan <- blobDigests + originalBlobDigestChan <- originalBlobDigests }() // Compress final nydus bootstrap to tar.gz and write into content store. @@ -791,10 +1083,17 @@ func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descript return nil, nil, errors.Wrap(err, "get info from content store") } - blobDigests := <-blobDigestChan + originalBlobDigests := <-originalBlobDigestChan blobDescs := []ocispec.Descriptor{} - blobIDs := []string{} - for _, blobDigest := range blobDigests { + + var blobDigests []digest.Digest + if opt.OCIRef { + blobDigests = nydusBlobDigests + } else { + blobDigests = originalBlobDigests + } + + for idx, blobDigest := range blobDigests { blobInfo, err := cs.Info(ctx, blobDigest) if err != nil { return nil, nil, errors.Wrap(err, "get info from content store") @@ -808,30 +1107,29 @@ func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descript LayerAnnotationNydusBlob: "true", }, } + if opt.OCIRef { + blobDesc.Annotations[label.NydusRefLayer] = layers[idx].OriginalDigest.String() + } blobDescs = append(blobDescs, blobDesc) - blobIDs = append(blobIDs, blobDigest.Hex()) - } - - blobIDsBytes, err := json.Marshal(blobIDs) - if err != nil { - return nil, nil, errors.Wrap(err, "marshal blob ids") } if opt.FsVersion == "" { - opt.FsVersion = "5" + opt.FsVersion = "6" + } + mediaType := images.MediaTypeDockerSchema2LayerGzip + if opt.OCI { + mediaType = ocispec.MediaTypeImageLayerGzip } bootstrapDesc := ocispec.Descriptor{ Digest: compressedDgst, Size: bootstrapInfo.Size, - MediaType: ocispec.MediaTypeImageLayerGzip, + MediaType: mediaType, Annotations: map[string]string{ LayerAnnotationUncompressed: uncompressedDgst.Digest().String(), LayerAnnotationFSVersion: opt.FsVersion, // Use this annotation to identify nydus bootstrap layer. LayerAnnotationNydusBootstrap: "true", - // Track all blob digests for nydus snapshotter. - LayerAnnotationNydusBlobIDs: string(blobIDsBytes), }, } diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/cs_proxy_unix.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/cs_proxy_unix.go new file mode 100644 index 0000000000..43c8e02287 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/cs_proxy_unix.go @@ -0,0 +1,168 @@ +//go:build !windows +// +build !windows + +/* + * Copyright (c) 2023. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "archive/tar" + "context" + "fmt" + "io" + "net" + "net/http" + "os" + "strconv" + "strings" + + "github.com/containerd/containerd/content" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type contentStoreProxy struct { + socketPath string + server *http.Server +} + +func setupContentStoreProxy(workDir string, ra content.ReaderAt) (*contentStoreProxy, error) { + sockP, err := os.CreateTemp(workDir, "nydus-cs-proxy-*.sock") + if err != nil { + return nil, errors.Wrap(err, "create unix socket file") + } + if err := os.Remove(sockP.Name()); err != nil { + return nil, err + } + listener, err := net.Listen("unix", sockP.Name()) + if err != nil { + return nil, errors.Wrap(err, "listen unix socket when setup content store proxy") + } + + server := &http.Server{ + Handler: contentProxyHandler(ra), + } + + go func() { + if err := server.Serve(listener); err != nil && err != http.ErrServerClosed { + logrus.WithError(err).Warn("serve content store proxy") + } + }() + + return &contentStoreProxy{ + socketPath: sockP.Name(), + server: server, + }, nil +} + +func (p *contentStoreProxy) close() error { + defer os.Remove(p.socketPath) + if err := p.server.Shutdown(context.Background()); err != nil { + return errors.Wrap(err, "shutdown content store proxy") + } + return nil +} + +func parseRangeHeader(rangeStr string, totalLen int64) (start, wantedLen int64, err error) { + rangeList := strings.Split(rangeStr, "-") + start, err = strconv.ParseInt(rangeList[0], 10, 64) + if err != nil { + err = errors.Wrap(err, "parse range header") + return + } + if len(rangeList) == 2 { + var end int64 + end, err = strconv.ParseInt(rangeList[1], 10, 64) + if err != nil { + err = errors.Wrap(err, "parse range header") + return + } + wantedLen = end - start + 1 + } else { + wantedLen = totalLen - start + } + if start < 0 || start >= totalLen || wantedLen <= 0 { + err = fmt.Errorf("invalid range header: %s", rangeStr) + return + } + return +} + +func contentProxyHandler(ra content.ReaderAt) http.Handler { + var ( + dataReader io.Reader + curPos int64 + + tarHeader *tar.Header + totalLen int64 + ) + resetReader := func() { + // TODO: Handle error? + _, _ = seekFile(ra, EntryBlob, func(reader io.Reader, hdr *tar.Header) error { + dataReader, tarHeader = reader, hdr + return nil + }) + curPos = 0 + } + + resetReader() + if tarHeader != nil { + totalLen = tarHeader.Size + } else { + totalLen = ra.Size() + } + handler := func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodHead: + { + w.Header().Set("Content-Length", strconv.FormatInt(totalLen, 10)) + w.Header().Set("Content-Type", "application/octet-stream") + return + } + case http.MethodGet: + { + start, wantedLen, err := parseRangeHeader(strings.TrimPrefix(r.Header.Get("Range"), "bytes="), totalLen) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + // TODO: Handle error? + _, _ = w.Write([]byte(err.Error())) + return + } + + // we need to make sure that the dataReader is at the right position + if start < curPos { + resetReader() + } + if start > curPos { + _, err = io.CopyN(io.Discard, dataReader, start-curPos) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + // TODO: Handle error? + _, _ = w.Write([]byte(err.Error())) + return + } + curPos = start + } + // then, the curPos must be equal to start + + readLen, err := io.CopyN(w, dataReader, wantedLen) + if err != nil && !errors.Is(err, io.EOF) { + w.WriteHeader(http.StatusInternalServerError) + // TODO: Handle error? + _, _ = w.Write([]byte(err.Error())) + return + } + curPos += readLen + w.Header().Set("Content-Length", strconv.FormatInt(readLen, 10)) + w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, start+readLen-1, totalLen)) + w.Header().Set("Content-Type", "application/octet-stream") + return + } + } + } + return http.HandlerFunc(handler) +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go index 55e98cc097..5c8284e95a 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go @@ -10,12 +10,11 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "os" "os/exec" "strings" "time" - "github.com/containerd/nydus-snapshotter/pkg/errdefs" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -23,6 +22,10 @@ import ( var logger = logrus.WithField("module", "builder") +func isSignalKilled(err error) bool { + return strings.Contains(err.Error(), "signal: killed") +} + type PackOption struct { BuilderPath string @@ -33,35 +36,47 @@ type PackOption struct { ChunkDictPath string PrefetchPatterns string Compressor string + OCIRef bool + AlignedChunk bool + ChunkSize string + BatchSize string Timeout *time.Duration + + Features Features } type MergeOption struct { BuilderPath string SourceBootstrapPaths []string - TargetBootstrapPath string - ChunkDictPath string - PrefetchPatterns string - OutputJSONPath string - Timeout *time.Duration + RafsBlobDigests []string + RafsBlobTOCDigests []string + RafsBlobSizes []int64 + + TargetBootstrapPath string + ChunkDictPath string + ParentBootstrapPath string + PrefetchPatterns string + OutputJSONPath string + Timeout *time.Duration } type UnpackOption struct { - BuilderPath string - BootstrapPath string - BlobPath string - TarPath string - Timeout *time.Duration + BuilderPath string + BootstrapPath string + BlobPath string + BackendConfigPath string + TarPath string + Timeout *time.Duration } type outputJSON struct { Blobs []string } -func Pack(option PackOption) error { +func buildPackArgs(option PackOption) []string { if option.FsVersion == "" { - option.FsVersion = "5" + option.FsVersion = "6" } args := []string{ @@ -72,14 +87,37 @@ func Pack(option PackOption) error { "fs", "--blob", option.BlobPath, - "--source-type", - "directory", "--whiteout-spec", "none", "--fs-version", option.FsVersion, - "--inline-bootstrap", } + + if option.Features.Contains(FeatureTar2Rafs) { + args = append( + args, + "--type", + "tar-rafs", + "--blob-inline-meta", + ) + if option.FsVersion == "6" { + args = append( + args, + "--features", + "blob-toc", + ) + } + } else { + args = append( + args, + "--source-type", + "directory", + // Sames with `--blob-inline-meta`, it's used for compatibility + // with the old nydus-image builder. + "--inline-bootstrap", + ) + } + if option.ChunkDictPath != "" { args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath)) } @@ -89,6 +127,65 @@ func Pack(option PackOption) error { if option.Compressor != "" { args = append(args, "--compressor", option.Compressor) } + if option.AlignedChunk { + args = append(args, "--aligned-chunk") + } + if option.ChunkSize != "" { + args = append(args, "--chunk-size", option.ChunkSize) + } + if option.BatchSize != "" { + args = append(args, "--batch-size", option.BatchSize) + } + args = append(args, option.SourcePath) + + return args +} + +func Pack(option PackOption) error { + if option.OCIRef { + return packRef(option) + } + + ctx := context.Background() + var cancel context.CancelFunc + if option.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *option.Timeout) + defer cancel() + } + + args := buildPackArgs(option) + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args, " ")) + + cmd := exec.CommandContext(ctx, option.BuilderPath, args...) + cmd.Stdout = logger.Writer() + cmd.Stderr = logger.Writer() + cmd.Stdin = strings.NewReader(option.PrefetchPatterns) + + if err := cmd.Run(); err != nil { + if isSignalKilled(err) && option.Timeout != nil { + logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) + } else { + logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) + } + return err + } + + return nil +} + +func packRef(option PackOption) error { + args := []string{ + "create", + "--log-level", + "warn", + "--type", + "targz-ref", + "--blob-inline-meta", + "--features", + "blob-toc", + "--blob", + option.BlobPath, + } args = append(args, option.SourcePath) ctx := context.Background() @@ -98,15 +195,14 @@ func Pack(option PackOption) error { defer cancel() } - logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args, " ")) cmd := exec.CommandContext(ctx, option.BuilderPath, args...) cmd.Stdout = logger.Writer() cmd.Stderr = logger.Writer() - cmd.Stdin = strings.NewReader(option.PrefetchPatterns) if err := cmd.Run(); err != nil { - if errdefs.IsSignalKilled(err) && option.Timeout != nil { + if isSignalKilled(err) && option.Timeout != nil { logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) } else { logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) @@ -132,10 +228,26 @@ func Merge(option MergeOption) ([]digest.Digest, error) { if option.ChunkDictPath != "" { args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath)) } + if option.ParentBootstrapPath != "" { + args = append(args, "--parent-bootstrap", option.ParentBootstrapPath) + } if option.PrefetchPatterns == "" { option.PrefetchPatterns = "/" } args = append(args, option.SourceBootstrapPaths...) + if len(option.RafsBlobDigests) > 0 { + args = append(args, "--blob-digests", strings.Join(option.RafsBlobDigests, ",")) + } + if len(option.RafsBlobTOCDigests) > 0 { + args = append(args, "--blob-toc-digests", strings.Join(option.RafsBlobTOCDigests, ",")) + } + if len(option.RafsBlobSizes) > 0 { + sizes := []string{} + for _, size := range option.RafsBlobSizes { + sizes = append(sizes, fmt.Sprintf("%d", size)) + } + args = append(args, "--blob-sizes", strings.Join(sizes, ",")) + } ctx := context.Background() var cancel context.CancelFunc @@ -143,7 +255,7 @@ func Merge(option MergeOption) ([]digest.Digest, error) { ctx, cancel = context.WithTimeout(ctx, *option.Timeout) defer cancel() } - logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args, " ")) cmd := exec.CommandContext(ctx, option.BuilderPath, args...) cmd.Stdout = logger.Writer() @@ -151,7 +263,7 @@ func Merge(option MergeOption) ([]digest.Digest, error) { cmd.Stdin = strings.NewReader(option.PrefetchPatterns) if err := cmd.Run(); err != nil { - if errdefs.IsSignalKilled(err) && option.Timeout != nil { + if isSignalKilled(err) && option.Timeout != nil { logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) } else { logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) @@ -159,7 +271,7 @@ func Merge(option MergeOption) ([]digest.Digest, error) { return nil, errors.Wrap(err, "run merge command") } - outputBytes, err := ioutil.ReadFile(option.OutputJSONPath) + outputBytes, err := os.ReadFile(option.OutputJSONPath) if err != nil { return nil, errors.Wrapf(err, "read file %s", option.OutputJSONPath) } @@ -187,7 +299,10 @@ func Unpack(option UnpackOption) error { "--output", option.TarPath, } - if option.BlobPath != "" { + + if option.BackendConfigPath != "" { + args = append(args, "--backend-config", option.BackendConfigPath) + } else if option.BlobPath != "" { args = append(args, "--blob", option.BlobPath) } @@ -198,14 +313,14 @@ func Unpack(option UnpackOption) error { defer cancel() } - logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args, " ")) cmd := exec.CommandContext(ctx, option.BuilderPath, args...) cmd.Stdout = logger.Writer() cmd.Stderr = logger.Writer() if err := cmd.Run(); err != nil { - if errdefs.IsSignalKilled(err) && option.Timeout != nil { + if isSignalKilled(err) && option.Timeout != nil { logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) } else { logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/feature.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/feature.go new file mode 100644 index 0000000000..f20c11b73c --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/feature.go @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2023. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package tool + +import ( + "context" + "os" + "os/exec" + "regexp" + "sync" + + "github.com/sirupsen/logrus" + "golang.org/x/mod/semver" +) + +const envNydusDisableTar2Rafs = "NYDUS_DISABLE_TAR2RAFS" + +var currentVersion string +var currentVersionDetectOnce sync.Once +var disableTar2Rafs = os.Getenv(envNydusDisableTar2Rafs) != "" + +const ( + // The option `--type tar-rafs` enables converting OCI tar blob + // stream into nydus blob directly, the tar2rafs eliminates the + // need to decompress it to a local directory first, thus greatly + // accelerating the pack process. + FeatureTar2Rafs Feature = "--type tar-rafs" +) + +var featureMap = map[Feature]string{ + FeatureTar2Rafs: "v2.2", +} + +type Feature string +type Features []Feature + +func (features *Features) Contains(feature Feature) bool { + for _, feat := range *features { + if feat == feature { + return true + } + } + return false +} + +func (features *Features) Remove(feature Feature) { + found := -1 + for idx, feat := range *features { + if feat == feature { + found = idx + break + } + } + if found != -1 { + *features = append((*features)[:found], (*features)[found+1:]...) + } +} + +func detectVersion(msg []byte) string { + re := regexp.MustCompile(`Version:\s*v*(\d+.\d+.\d+)`) + matches := re.FindSubmatch(msg) + if len(matches) > 1 { + return string(matches[1]) + } + return "" +} + +// DetectFeatures returns supported feature list from required feature list. +func DetectFeatures(builder string, required Features) Features { + currentVersionDetectOnce.Do(func() { + if required.Contains(FeatureTar2Rafs) && disableTar2Rafs { + logrus.Warnf("the feature '%s' is disabled by env '%s'", FeatureTar2Rafs, envNydusDisableTar2Rafs) + } + + cmd := exec.CommandContext(context.Background(), builder, "--version") + output, err := cmd.Output() + if err != nil { + return + } + + currentVersion = detectVersion(output) + }) + + if currentVersion == "" { + return Features{} + } + + detectedFeatures := Features{} + for _, feature := range required { + requiredVersion := featureMap[feature] + if requiredVersion == "" { + detectedFeatures = append(detectedFeatures, feature) + continue + } + + // The feature is supported by current version + supported := semver.Compare(requiredVersion, "v"+currentVersion) <= 0 + if supported { + // It is an experimental feature, so we still provide an env + // variable to allow users to disable it. + if feature == FeatureTar2Rafs && disableTar2Rafs { + continue + } + detectedFeatures = append(detectedFeatures, feature) + } + } + + return detectedFeatures +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go index 9d0590a0c9..bbdea72ed2 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go @@ -8,24 +8,41 @@ package converter import ( "context" + "errors" + "fmt" + "strings" "time" "github.com/containerd/containerd/content" + "github.com/containerd/nydus-snapshotter/pkg/converter/tool" "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type Compressor = uint32 + +const ( + CompressorNone Compressor = 0x0001 + CompressorZstd Compressor = 0x0002 +) + +var ( + ErrNotFound = errors.New("data not found") ) type Layer struct { // Digest represents the hash of whole tar blob. Digest digest.Digest + // Digest represents the original OCI tar(.gz) blob. + OriginalDigest *digest.Digest // ReaderAt holds the reader of whole tar blob. ReaderAt content.ReaderAt } -// Backend uploads blobs generated by nydus-image builder to a backend storage such as: -// - oss: A object storage backend, which uses its SDK to upload blob file. +// Backend uploads blobs generated by nydus-image builder to a backend storage. type Backend interface { // Push pushes specified blob file to remote storage backend. - Push(ctx context.Context, ra content.ReaderAt, blobDigest digest.Digest) error + Push(ctx context.Context, cs content.Store, desc ocispec.Descriptor) error // Check checks whether a blob exists in remote storage backend, // blob exists -> return (blobPath, nil) // blob not exists -> return ("", err) @@ -40,7 +57,7 @@ type PackOption struct { // BuilderPath holds the path of `nydus-image` binary tool. BuilderPath string // FsVersion specifies nydus RAFS format version, possible - // values: `5`, `6` (EROFS-compatible), default is `5`. + // values: `5`, `6` (EROFS-compatible), default is `6`. FsVersion string // ChunkDictPath holds the bootstrap path of chunk dict image. ChunkDictPath string @@ -48,10 +65,22 @@ type PackOption struct { PrefetchPatterns string // Compressor specifies nydus blob compression algorithm. Compressor string + // OCIRef enables converting OCI tar(.gz) blob to nydus referenced blob. + OCIRef bool + // AlignedChunk aligns uncompressed data chunks to 4K, only for RAFS V5. + AlignedChunk bool + // ChunkSize sets the size of data chunks, must be power of two and between 0x1000-0x1000000. + ChunkSize string + // BacthSize sets the size of batch data chunks, must be power of two and between 0x1000-0x1000000 or zero. + BatchSize string // Backend uploads blobs generated by nydus-image builder to a backend storage. Backend Backend // Timeout cancels execution once exceed the specified time. Timeout *time.Duration + + // Features keeps a feature list supported by newer version of builder, + // It is detected automatically, so don't export it. + features tool.Features } type MergeOption struct { @@ -60,14 +89,20 @@ type MergeOption struct { // BuilderPath holds the path of `nydus-image` binary tool. BuilderPath string // FsVersion specifies nydus RAFS format version, possible - // values: `5`, `6` (EROFS-compatible), default is `5`. + // values: `5`, `6` (EROFS-compatible), default is `6`. FsVersion string // ChunkDictPath holds the bootstrap path of chunk dict image. ChunkDictPath string + // ParentBootstrapPath holds the bootstrap path of parent image. + ParentBootstrapPath string // PrefetchPatterns holds file path pattern list want to prefetch. PrefetchPatterns string // WithTar puts bootstrap into a tar stream (no gzip). WithTar bool + // OCI converts docker media types to OCI media types. + OCI bool + // OCIRef enables converting OCI tar(.gz) blob to nydus referenced blob. + OCIRef bool // Backend uploads blobs generated by nydus-image builder to a backend storage. Backend Backend // Timeout cancels execution once exceed the specified time. @@ -81,4 +116,62 @@ type UnpackOption struct { BuilderPath string // Timeout cancels execution once exceed the specified time. Timeout *time.Duration + // Stream enables streaming mode, which doesn't unpack the blob data to disk, + // but setup a http server to serve the blob data. + Stream bool +} + +type TOCEntry struct { + // Feature flags of entry + Flags uint32 + Reserved1 uint32 + // Name of entry data + Name [16]byte + // Sha256 of uncompressed entry data + UncompressedDigest [32]byte + // Offset of compressed entry data + CompressedOffset uint64 + // Size of compressed entry data + CompressedSize uint64 + // Size of uncompressed entry data + UncompressedSize uint64 + Reserved2 [44]byte +} + +func (entry *TOCEntry) GetCompressor() (Compressor, error) { + switch { + case entry.Flags&CompressorNone == CompressorNone: + return CompressorNone, nil + case entry.Flags&CompressorZstd == CompressorZstd: + return CompressorZstd, nil + } + return 0, fmt.Errorf("unsupported compressor, entry flags %x", entry.Flags) +} + +func (entry *TOCEntry) GetName() string { + var name strings.Builder + name.Grow(16) + for _, c := range entry.Name { + if c == 0 { + break + } + fmt.Fprintf(&name, "%c", c) + } + return name.String() +} + +func (entry *TOCEntry) GetUncompressedDigest() string { + return fmt.Sprintf("%x", entry.UncompressedDigest) +} + +func (entry *TOCEntry) GetCompressedOffset() uint64 { + return entry.CompressedOffset +} + +func (entry *TOCEntry) GetCompressedSize() uint64 { + return entry.CompressedSize +} + +func (entry *TOCEntry) GetUncompressedSize() uint64 { + return entry.UncompressedSize } diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go index 849d870b34..b0b04d8b5f 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go @@ -59,18 +59,20 @@ type seekReader struct { func (ra *seekReader) Read(p []byte) (int, error) { n, err := ra.ReaderAt.ReadAt(p, ra.pos) - ra.pos += int64(len(p)) + ra.pos += int64(n) return n, err } func (ra *seekReader) Seek(offset int64, whence int) (int64, error) { - if whence == io.SeekCurrent { + switch { + case whence == io.SeekCurrent: ra.pos += offset - } else if whence == io.SeekStart { + case whence == io.SeekStart: ra.pos = offset - } else { + default: return 0, fmt.Errorf("unsupported whence %d", whence) } + return ra.pos, nil } @@ -126,11 +128,12 @@ func packToTar(src string, name string, compress bool) (io.ReadCloser, error) { var finalErr error // Return the first error encountered to the other end and ignore others. - if err != nil { + switch { + case err != nil: finalErr = err - } else if err1 != nil { + case err1 != nil: finalErr = err1 - } else if err2 != nil { + case err2 != nil: finalErr = err2 } @@ -168,6 +171,9 @@ func readJSON(ctx context.Context, cs content.Store, x interface{}, desc ocispec return nil, err } labels := info.Labels + if labels == nil { + labels = map[string]string{} + } b, err := content.ReadBlob(ctx, cs, desc) if err != nil { return nil, err diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go index 3bdf74cb9d..0676fcdedd 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go @@ -9,17 +9,19 @@ package errdefs import ( stderrors "errors" "net" - "strings" "syscall" + "github.com/containerd/containerd/errdefs" "github.com/pkg/errors" ) -const signalKilled = "signal: killed" - var ( - ErrAlreadyExists = errors.New("already exists") - ErrNotFound = errors.New("not found") + ErrAlreadyExists = errdefs.ErrAlreadyExists + ErrNotFound = errdefs.ErrNotFound + ErrInvalidArgument = errors.New("invalid argument") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented + ErrDeviceBusy = errors.New("device busy") // represents not supported and unimplemented ) // IsAlreadyExists returns true if the error is due to already exists @@ -32,11 +34,6 @@ func IsNotFound(err error) bool { return errors.Is(err, ErrNotFound) } -// IsSignalKilled returns true if the error is signal killed -func IsSignalKilled(err error) bool { - return strings.Contains(err.Error(), signalKilled) -} - // IsConnectionClosed returns true if error is due to connection closed // this is used when snapshotter closed by sig term func IsConnectionClosed(err error) bool { diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go new file mode 100644 index 0000000000..af9417bce0 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2020. Ant Group. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package label + +import ( + snpkg "github.com/containerd/containerd/pkg/snapshotters" +) + +// For package compatibility, we still keep the old exported name here. +var AppendLabelsHandlerWrapper = snpkg.AppendInfoHandlerWrapper + +// For package compatibility, we still keep the old exported name here. +const ( + CRIImageRef = snpkg.TargetRefLabel + CRIImageLayers = snpkg.TargetImageLayersLabel + CRILayerDigest = snpkg.TargetLayerDigestLabel + CRIManifestDigest = snpkg.TargetManifestDigestLabel +) + +const ( + // Marker for remote snapshotter to handle the pull request. + // During image pull, the containerd client calls Prepare API with the label containerd.io/snapshot.ref. + // This is a containerd-defined label which contains ChainID that targets a committed snapshot that the + // client is trying to prepare. + TargetSnapshotRef = "containerd.io/snapshot.ref" + + // A bool flag to mark the blob as a Nydus data blob, set by image builders. + NydusDataLayer = "containerd.io/snapshot/nydus-blob" + // A bool flag to mark the blob as a nydus bootstrap, set by image builders. + NydusMetaLayer = "containerd.io/snapshot/nydus-bootstrap" + // The referenced blob sha256 in format of `sha256:xxx`, set by image builders. + NydusRefLayer = "containerd.io/snapshot/nydus-ref" + // Annotation containing secret to pull images from registry, set by the snapshotter. + NydusImagePullSecret = "containerd.io/snapshot/pullsecret" + // Annotation containing username to pull images from registry, set by the snapshotter. + NydusImagePullUsername = "containerd.io/snapshot/pullusername" + // A bool flag to enable integrity verification of meta data blob + NydusSignature = "containerd.io/snapshot/nydus-signature" + + // A bool flag to mark the blob as a estargz data blob, set by the snapshotter. + StargzLayer = "containerd.io/snapshot/stargz" + + // volatileOpt is a key of an optional label to each snapshot. + // If this optional label of a snapshot is specified, when mounted to rootdir + // this snapshot will include volatile option + OverlayfsVolatileOpt = "containerd.io/snapshot/overlay.volatile" +) + +func IsNydusDataLayer(labels map[string]string) bool { + _, ok := labels[NydusDataLayer] + return ok +} + +func IsNydusMetaLayer(labels map[string]string) bool { + if labels == nil { + return false + } + _, ok := labels[NydusMetaLayer] + return ok +} diff --git a/vendor/github.com/containerd/typeurl/.gitignore b/vendor/github.com/containerd/typeurl/.gitignore deleted file mode 100644 index d53846778b..0000000000 --- a/vendor/github.com/containerd/typeurl/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.test -coverage.txt diff --git a/vendor/github.com/containerd/typeurl/README.md b/vendor/github.com/containerd/typeurl/README.md deleted file mode 100644 index d021e96724..0000000000 --- a/vendor/github.com/containerd/typeurl/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# typeurl - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/typeurl)](https://pkg.go.dev/github.com/containerd/typeurl) -[![Build Status](https://github.com/containerd/typeurl/workflows/CI/badge.svg)](https://github.com/containerd/typeurl/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl) -[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/typeurl)](https://goreportcard.com/report/github.com/containerd/typeurl) - -A Go package for managing the registration, marshaling, and unmarshaling of encoded types. - -This package helps when types are sent over a GRPC API and marshaled as a [protobuf.Any](https://github.com/gogo/protobuf/blob/master/protobuf/google/protobuf/any.proto). - -## Project details - -**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/typeurl/doc.go b/vendor/github.com/containerd/typeurl/doc.go deleted file mode 100644 index c0d0fd2053..0000000000 --- a/vendor/github.com/containerd/typeurl/doc.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -// Package typeurl assists with managing the registration, marshaling, and -// unmarshaling of types encoded as protobuf.Any. -// -// A protobuf.Any is a proto message that can contain any arbitrary data. It -// consists of two components, a TypeUrl and a Value, and its proto definition -// looks like this: -// -// message Any { -// string type_url = 1; -// bytes value = 2; -// } -// -// The TypeUrl is used to distinguish the contents from other proto.Any -// messages. This typeurl library manages these URLs to enable automagic -// marshaling and unmarshaling of the contents. -// -// For example, consider this go struct: -// -// type Foo struct { -// Field1 string -// Field2 string -// } -// -// To use typeurl, types must first be registered. This is typically done in -// the init function -// -// func init() { -// typeurl.Register(&Foo{}, "Foo") -// } -// -// This will register the type Foo with the url path "Foo". The arguments to -// Register are variadic, and are used to construct a url path. Consider this -// example, from the github.com/containerd/containerd/client package: -// -// func init() { -// const prefix = "types.containerd.io" -// // register TypeUrls for commonly marshaled external types -// major := strconv.Itoa(specs.VersionMajor) -// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec") -// // this function has more Register calls, which are elided. -// } -// -// This registers several types under a more complex url, which ends up mapping -// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other -// value for major). -// -// Once a type is registered, it can be marshaled to a proto.Any message simply -// by calling `MarshalAny`, like this: -// -// foo := &Foo{Field1: "value1", Field2: "value2"} -// anyFoo, err := typeurl.MarshalAny(foo) -// -// MarshalAny will resolve the correct URL for the type. If the type in -// question implements the proto.Message interface, then it will be marshaled -// as a proto message. Otherwise, it will be marshaled as json. This means that -// typeurl will work on any arbitrary data, whether or not it has a proto -// definition, as long as it can be serialized to json. -// -// To unmarshal, the process is simply inverse: -// -// iface, err := typeurl.UnmarshalAny(anyFoo) -// foo := iface.(*Foo) -// -// The correct type is automatically chosen from the type registry, and the -// returned interface can be cast straight to that type. diff --git a/vendor/github.com/containerd/typeurl/types.go b/vendor/github.com/containerd/typeurl/types.go deleted file mode 100644 index 647d419a29..0000000000 --- a/vendor/github.com/containerd/typeurl/types.go +++ /dev/null @@ -1,214 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -import ( - "encoding/json" - "path" - "reflect" - "sync" - - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" - "github.com/pkg/errors" -) - -var ( - mu sync.RWMutex - registry = make(map[reflect.Type]string) -) - -// Definitions of common error types used throughout typeurl. -// -// These error types are used with errors.Wrap and errors.Wrapf to add context -// to an error. -// -// To detect an error class, use errors.Is() functions to tell whether an -// error is of this type. -var ( - ErrNotFound = errors.New("not found") -) - -// Register a type with a base URL for JSON marshaling. When the MarshalAny and -// UnmarshalAny functions are called they will treat the Any type value as JSON. -// To use protocol buffers for handling the Any value the proto.Register -// function should be used instead of this function. -func Register(v interface{}, args ...string) { - var ( - t = tryDereference(v) - p = path.Join(args...) - ) - mu.Lock() - defer mu.Unlock() - if et, ok := registry[t]; ok { - if et != p { - panic(errors.Errorf("type registered with alternate path %q != %q", et, p)) - } - return - } - registry[t] = p -} - -// TypeURL returns the type url for a registered type. -func TypeURL(v interface{}) (string, error) { - mu.RLock() - u, ok := registry[tryDereference(v)] - mu.RUnlock() - if !ok { - // fallback to the proto registry if it is a proto message - pb, ok := v.(proto.Message) - if !ok { - return "", errors.Wrapf(ErrNotFound, "type %s", reflect.TypeOf(v)) - } - return proto.MessageName(pb), nil - } - return u, nil -} - -// Is returns true if the type of the Any is the same as v. -func Is(any *types.Any, v interface{}) bool { - // call to check that v is a pointer - tryDereference(v) - url, err := TypeURL(v) - if err != nil { - return false - } - return any.TypeUrl == url -} - -// MarshalAny marshals the value v into an any with the correct TypeUrl. -// If the provided object is already a proto.Any message, then it will be -// returned verbatim. If it is of type proto.Message, it will be marshaled as a -// protocol buffer. Otherwise, the object will be marshaled to json. -func MarshalAny(v interface{}) (*types.Any, error) { - var marshal func(v interface{}) ([]byte, error) - switch t := v.(type) { - case *types.Any: - // avoid reserializing the type if we have an any. - return t, nil - case proto.Message: - marshal = func(v interface{}) ([]byte, error) { - return proto.Marshal(t) - } - default: - marshal = json.Marshal - } - - url, err := TypeURL(v) - if err != nil { - return nil, err - } - - data, err := marshal(v) - if err != nil { - return nil, err - } - return &types.Any{ - TypeUrl: url, - Value: data, - }, nil -} - -// UnmarshalAny unmarshals the any type into a concrete type. -func UnmarshalAny(any *types.Any) (interface{}, error) { - return UnmarshalByTypeURL(any.TypeUrl, any.Value) -} - -// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type. -func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) { - return unmarshal(typeURL, value, nil) -} - -// UnmarshalTo unmarshals the any type into a concrete type passed in the out -// argument. It is identical to UnmarshalAny, but lets clients provide a -// destination type through the out argument. -func UnmarshalTo(any *types.Any, out interface{}) error { - return UnmarshalToByTypeURL(any.TypeUrl, any.Value, out) -} - -// UnmarshalTo unmarshals the given type and value into a concrete type passed -// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients -// provide a destination type through the out argument. -func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error { - _, err := unmarshal(typeURL, value, out) - return err -} - -func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) { - t, err := getTypeByUrl(typeURL) - if err != nil { - return nil, err - } - - if v == nil { - v = reflect.New(t.t).Interface() - } else { - // Validate interface type provided by client - vURL, err := TypeURL(v) - if err != nil { - return nil, err - } - if typeURL != vURL { - return nil, errors.Errorf("can't unmarshal type %q to output %q", typeURL, vURL) - } - } - - if t.isProto { - err = proto.Unmarshal(value, v.(proto.Message)) - } else { - err = json.Unmarshal(value, v) - } - - return v, err -} - -type urlType struct { - t reflect.Type - isProto bool -} - -func getTypeByUrl(url string) (urlType, error) { - mu.RLock() - for t, u := range registry { - if u == url { - mu.RUnlock() - return urlType{ - t: t, - }, nil - } - } - mu.RUnlock() - // fallback to proto registry - t := proto.MessageType(url) - if t != nil { - return urlType{ - // get the underlying Elem because proto returns a pointer to the type - t: t.Elem(), - isProto: true, - }, nil - } - return urlType{}, errors.Wrapf(ErrNotFound, "type with url %s", url) -} - -func tryDereference(v interface{}) reflect.Type { - t := reflect.TypeOf(v) - if t.Kind() == reflect.Ptr { - // require check of pointer but dereference to register - return t.Elem() - } - panic("v is not a pointer to a type") -} diff --git a/vendor/github.com/moby/buildkit/cache/blobs.go b/vendor/github.com/moby/buildkit/cache/blobs.go index 716be90934..33e9693f19 100644 --- a/vendor/github.com/moby/buildkit/cache/blobs.go +++ b/vendor/github.com/moby/buildkit/cache/blobs.go @@ -11,6 +11,7 @@ import ( "github.com/containerd/containerd/leases" "github.com/containerd/containerd/mount" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/winlayers" @@ -18,11 +19,11 @@ import ( imagespecidentity "github.com/opencontainers/image-spec/identity" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) -var g flightcontrol.Group +var g flightcontrol.Group[struct{}] +var gFileList flightcontrol.Group[[]string] const containerdUncompressed = "containerd.io/uncompressed" @@ -86,12 +87,12 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool if _, ok := filter[sr.ID()]; ok { eg.Go(func() error { - _, err := g.Do(ctx, fmt.Sprintf("%s-%t", sr.ID(), createIfNeeded), func(ctx context.Context) (interface{}, error) { + _, err := g.Do(ctx, fmt.Sprintf("%s-%t", sr.ID(), createIfNeeded), func(ctx context.Context) (struct{}, error) { if sr.getBlob() != "" { - return nil, nil + return struct{}{}, nil } if !createIfNeeded { - return nil, errors.WithStack(ErrNoBlobs) + return struct{}{}, errors.WithStack(ErrNoBlobs) } compressorFunc, finalize := comp.Type.Compress(ctx, comp) @@ -108,12 +109,12 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool if lowerRef != nil { m, err := lowerRef.Mount(ctx, true, s) if err != nil { - return nil, err + return struct{}{}, err } var release func() error lower, release, err = m.Mount() if err != nil { - return nil, err + return struct{}{}, err } if release != nil { defer release() @@ -131,12 +132,12 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool if upperRef != nil { m, err := upperRef.Mount(ctx, true, s) if err != nil { - return nil, err + return struct{}{}, err } var release func() error upper, release, err = m.Mount() if err != nil { - return nil, err + return struct{}{}, err } if release != nil { defer release() @@ -151,7 +152,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool if forceOvlStr := os.Getenv("BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF"); forceOvlStr != "" && sr.kind() != Diff { enableOverlay, err = strconv.ParseBool(forceOvlStr) if err != nil { - return nil, errors.Wrapf(err, "invalid boolean in BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF") + return struct{}{}, errors.Wrapf(err, "invalid boolean in BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF") } fallback = false // prohibit fallback on debug } else if !isTypeWindows(sr) { @@ -173,14 +174,14 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool if !ok || err != nil { if !fallback { if !ok { - return nil, errors.Errorf("overlay mounts not detected (lower=%+v,upper=%+v)", lower, upper) + return struct{}{}, errors.Errorf("overlay mounts not detected (lower=%+v,upper=%+v)", lower, upper) } if err != nil { - return nil, errors.Wrapf(err, "failed to compute overlay diff") + return struct{}{}, errors.Wrapf(err, "failed to compute overlay diff") } } if logWarnOnErr { - logrus.Warnf("failed to compute blob by overlay differ (ok=%v): %v", ok, err) + bklog.G(ctx).Warnf("failed to compute blob by overlay differ (ok=%v): %v", ok, err) } } if ok { @@ -198,7 +199,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool diff.WithCompressor(compressorFunc), ) if err != nil { - logrus.WithError(err).Warnf("failed to compute blob by buildkit differ") + bklog.G(ctx).WithError(err).Warnf("failed to compute blob by buildkit differ") } } @@ -209,7 +210,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool diff.WithCompressor(compressorFunc), ) if err != nil { - return nil, err + return struct{}{}, err } } @@ -219,7 +220,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool if finalize != nil { a, err := finalize(ctx, sr.cm.ContentStore) if err != nil { - return nil, errors.Wrapf(err, "failed to finalize compression") + return struct{}{}, errors.Wrapf(err, "failed to finalize compression") } for k, v := range a { desc.Annotations[k] = v @@ -227,7 +228,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool } info, err := sr.cm.ContentStore.Info(ctx, desc.Digest) if err != nil { - return nil, err + return struct{}{}, err } if diffID, ok := info.Labels[containerdUncompressed]; ok { @@ -235,13 +236,13 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool } else if mediaType == ocispecs.MediaTypeImageLayer { desc.Annotations[containerdUncompressed] = desc.Digest.String() } else { - return nil, errors.Errorf("unknown layer compression type") + return struct{}{}, errors.Errorf("unknown layer compression type") } if err := sr.setBlob(ctx, desc); err != nil { - return nil, err + return struct{}{}, err } - return nil, nil + return struct{}{}, nil }) if err != nil { return err @@ -415,29 +416,29 @@ func isTypeWindows(sr *immutableRef) bool { // ensureCompression ensures the specified ref has the blob of the specified compression Type. func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) error { - _, err := g.Do(ctx, fmt.Sprintf("%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (interface{}, error) { + _, err := g.Do(ctx, fmt.Sprintf("ensureComp-%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (struct{}, error) { desc, err := ref.ociDesc(ctx, ref.descHandlers, true) if err != nil { - return nil, err + return struct{}{}, err } // Resolve converters layerConvertFunc, err := getConverter(ctx, ref.cm.ContentStore, desc, comp) if err != nil { - return nil, err + return struct{}{}, err } else if layerConvertFunc == nil { if isLazy, err := ref.isLazy(ctx); err != nil { - return nil, err + return struct{}{}, err } else if isLazy { // This ref can be used as the specified compressionType. Keep it lazy. - return nil, nil + return struct{}{}, nil } - return nil, ref.linkBlob(ctx, desc) + return struct{}{}, ref.linkBlob(ctx, desc) } // First, lookup local content store if _, err := ref.getBlobWithCompression(ctx, comp.Type); err == nil { - return nil, nil // found the compression variant. no need to convert. + return struct{}{}, nil // found the compression variant. no need to convert. } // Convert layer compression type @@ -447,18 +448,18 @@ func ensureCompression(ctx context.Context, ref *immutableRef, comp compression. dh: ref.descHandlers[desc.Digest], session: s, }).Unlazy(ctx); err != nil { - return nil, err + return struct{}{}, err } newDesc, err := layerConvertFunc(ctx, ref.cm.ContentStore, desc) if err != nil { - return nil, errors.Wrapf(err, "failed to convert") + return struct{}{}, errors.Wrapf(err, "failed to convert") } // Start to track converted layer if err := ref.linkBlob(ctx, *newDesc); err != nil { - return nil, errors.Wrapf(err, "failed to add compression blob") + return struct{}{}, errors.Wrapf(err, "failed to add compression blob") } - return nil, nil + return struct{}{}, nil }) return err } diff --git a/vendor/github.com/moby/buildkit/cache/compression_nydus.go b/vendor/github.com/moby/buildkit/cache/compression_nydus.go index 48b61a4b36..1b64430647 100644 --- a/vendor/github.com/moby/buildkit/cache/compression_nydus.go +++ b/vendor/github.com/moby/buildkit/cache/compression_nydus.go @@ -6,7 +6,6 @@ package cache import ( "compress/gzip" "context" - "encoding/json" "io" "github.com/containerd/containerd/content" @@ -18,13 +17,13 @@ import ( ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - nydusify "github.com/containerd/nydus-snapshotter/pkg/converter" + "github.com/containerd/nydus-snapshotter/pkg/converter" ) func init() { additionalAnnotations = append( additionalAnnotations, - nydusify.LayerAnnotationNydusBlob, nydusify.LayerAnnotationNydusBootstrap, nydusify.LayerAnnotationNydusBlobIDs, + converter.LayerAnnotationNydusBlob, converter.LayerAnnotationNydusBootstrap, ) } @@ -58,7 +57,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, // Extracts nydus bootstrap from nydus format for each layer. var cm *cacheManager - layers := []nydusify.Layer{} + layers := []converter.Layer{} blobIDs := []string{} for _, ref := range refs { blobDesc, err := getBlobWithCompressionWithRetry(ctx, ref, comp, s) @@ -74,7 +73,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, cm = ref.cm } blobIDs = append(blobIDs, blobDesc.Digest.Hex()) - layers = append(layers, nydusify.Layer{ + layers = append(layers, converter.Layer{ Digest: blobDesc.Digest, ReaderAt: ra, }) @@ -84,7 +83,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, pr, pw := io.Pipe() go func() { defer pw.Close() - if _, err := nydusify.Merge(ctx, layers, pw, nydusify.MergeOption{ + if _, err := converter.Merge(ctx, layers, pw, converter.MergeOption{ WithTar: true, }); err != nil { pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap")) @@ -125,11 +124,6 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, return nil, errors.Wrap(err, "get info from content store") } - blobIDsBytes, err := json.Marshal(blobIDs) - if err != nil { - return nil, errors.Wrap(err, "marshal blob ids") - } - desc := ocispecs.Descriptor{ Digest: compressedDgst, Size: info.Size, @@ -137,9 +131,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, Annotations: map[string]string{ containerdUncompressed: uncompressedDgst.Digest().String(), // Use this annotation to identify nydus bootstrap layer. - nydusify.LayerAnnotationNydusBootstrap: "true", - // Track all blob digests for nydus snapshotter. - nydusify.LayerAnnotationNydusBlobIDs: string(blobIDsBytes), + converter.LayerAnnotationNydusBootstrap: "true", }, } diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go index dcf424a6b4..e0f58d57b3 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go @@ -110,7 +110,9 @@ func (cm *cacheManager) GetCacheContext(ctx context.Context, md cache.RefMetadat cm.lruMu.Unlock() if ok { cm.locker.Unlock(md.ID()) + v.(*cacheContext).mu.Lock() // locking is required because multiple ImmutableRefs can reach this code; however none of them use the linkMap. v.(*cacheContext).linkMap = map[string][][]byte{} + v.(*cacheContext).mu.Unlock() return v.(*cacheContext), nil } cc, err := newCacheContext(md) diff --git a/vendor/github.com/moby/buildkit/cache/filelist.go b/vendor/github.com/moby/buildkit/cache/filelist.go index c2c7921fd5..0cb2e9b60a 100644 --- a/vendor/github.com/moby/buildkit/cache/filelist.go +++ b/vendor/github.com/moby/buildkit/cache/filelist.go @@ -20,7 +20,7 @@ const keyFileList = "filelist" // are in the tar stream (AUFS whiteout format). If the reference does not have a // a blob associated with it, the list is empty. func (sr *immutableRef) FileList(ctx context.Context, s session.Group) ([]string, error) { - res, err := g.Do(ctx, fmt.Sprintf("filelist-%s", sr.ID()), func(ctx context.Context) (interface{}, error) { + return gFileList.Do(ctx, fmt.Sprintf("filelist-%s", sr.ID()), func(ctx context.Context) ([]string, error) { dt, err := sr.GetExternal(keyFileList) if err == nil && dt != nil { var files []string @@ -80,11 +80,4 @@ func (sr *immutableRef) FileList(ctx context.Context, s session.Group) ([]string } return files, nil }) - if err != nil { - return nil, err - } - if res == nil { - return nil, nil - } - return res.([]string), nil } diff --git a/vendor/github.com/moby/buildkit/cache/manager.go b/vendor/github.com/moby/buildkit/cache/manager.go index d579a6007b..64322055ef 100644 --- a/vendor/github.com/moby/buildkit/cache/manager.go +++ b/vendor/github.com/moby/buildkit/cache/manager.go @@ -27,7 +27,6 @@ import ( imagespecidentity "github.com/opencontainers/image-spec/identity" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) @@ -94,7 +93,7 @@ type cacheManager struct { mountPool sharableMountPool muPrune sync.Mutex // make sure parallel prune is not allowed so there will not be inconsistent results - unlazyG flightcontrol.Group + unlazyG flightcontrol.Group[struct{}] } func NewManager(opt ManagerOpt) (Manager, error) { @@ -243,7 +242,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor, if err := cm.LeaseManager.Delete(context.TODO(), leases.Lease{ ID: l.ID, }); err != nil { - logrus.Errorf("failed to remove lease: %+v", err) + bklog.G(ctx).Errorf("failed to remove lease: %+v", err) } } }() @@ -319,7 +318,7 @@ func (cm *cacheManager) init(ctx context.Context) error { for _, si := range items { if _, err := cm.getRecord(ctx, si.ID()); err != nil { - logrus.Debugf("could not load snapshot %s: %+v", si.ID(), err) + bklog.G(ctx).Debugf("could not load snapshot %s: %+v", si.ID(), err) cm.MetadataStore.Clear(si.ID()) cm.LeaseManager.Delete(ctx, leases.Lease{ID: si.ID()}) } @@ -597,7 +596,7 @@ func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, sess session.Gr if err := cm.LeaseManager.Delete(context.TODO(), leases.Lease{ ID: l.ID, }); err != nil { - logrus.Errorf("failed to remove lease: %+v", err) + bklog.G(ctx).Errorf("failed to remove lease: %+v", err) } } }() @@ -1426,12 +1425,13 @@ func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) d.Size = 0 return nil } + defer ref.Release(context.TODO()) s, err := ref.size(ctx) if err != nil { return err } d.Size = s - return ref.Release(context.TODO()) + return nil }) }(d) } diff --git a/vendor/github.com/moby/buildkit/cache/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata.go index 82209a93c0..b223024dca 100644 --- a/vendor/github.com/moby/buildkit/cache/metadata.go +++ b/vendor/github.com/moby/buildkit/cache/metadata.go @@ -87,7 +87,7 @@ func (cm *cacheManager) Search(ctx context.Context, idx string) ([]RefMetadata, // callers must hold cm.mu lock func (cm *cacheManager) search(ctx context.Context, idx string) ([]RefMetadata, error) { - sis, err := cm.MetadataStore.Search(idx) + sis, err := cm.MetadataStore.Search(ctx, idx) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go index 170c0a8872..1240034a44 100644 --- a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go +++ b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go @@ -2,12 +2,13 @@ package metadata import ( "bytes" + "context" "encoding/json" "strings" "sync" + "github.com/moby/buildkit/util/bklog" "github.com/pkg/errors" - "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" ) @@ -80,7 +81,7 @@ func (s *Store) Probe(index string) (bool, error) { return exists, errors.WithStack(err) } -func (s *Store) Search(index string) ([]*StorageItem, error) { +func (s *Store) Search(ctx context.Context, index string) ([]*StorageItem, error) { var out []*StorageItem err := s.db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(indexBucket)) @@ -100,7 +101,7 @@ func (s *Store) Search(index string) ([]*StorageItem, error) { k, _ = c.Next() b := main.Bucket([]byte(itemID)) if b == nil { - logrus.Errorf("index pointing to missing record %s", itemID) + bklog.G(ctx).Errorf("index pointing to missing record %s", itemID) continue } si, err := newStorageItem(itemID, b, s) diff --git a/vendor/github.com/moby/buildkit/cache/refs.go b/vendor/github.com/moby/buildkit/cache/refs.go index 0af736ab70..e448f94b29 100644 --- a/vendor/github.com/moby/buildkit/cache/refs.go +++ b/vendor/github.com/moby/buildkit/cache/refs.go @@ -27,6 +27,7 @@ import ( "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/overlay" "github.com/moby/buildkit/util/progress" rootlessmountopts "github.com/moby/buildkit/util/rootless/mountopts" "github.com/moby/buildkit/util/winlayers" @@ -89,7 +90,7 @@ type cacheRecord struct { mountCache snapshot.Mountable - sizeG flightcontrol.Group + sizeG flightcontrol.Group[int64] // these are filled if multiple refs point to same data equalMutable *mutableRef @@ -107,6 +108,7 @@ func (cr *cacheRecord) ref(triggerLastUsed bool, descHandlers DescHandlers, pg p progress: pg, } cr.refs[ref] = struct{}{} + bklog.G(context.TODO()).WithFields(ref.traceLogFields()).Trace("acquired cache ref") return ref } @@ -118,6 +120,7 @@ func (cr *cacheRecord) mref(triggerLastUsed bool, descHandlers DescHandlers) *mu descHandlers: descHandlers, } cr.refs[ref] = struct{}{} + bklog.G(context.TODO()).WithFields(ref.traceLogFields()).Trace("acquired cache ref") return ref } @@ -322,7 +325,7 @@ func (cr *cacheRecord) viewSnapshotID() string { func (cr *cacheRecord) size(ctx context.Context) (int64, error) { // this expects that usage() is implemented lazily - s, err := cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (interface{}, error) { + return cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (int64, error) { cr.mu.Lock() s := cr.getSize() if s != sizeUnknown { @@ -343,7 +346,7 @@ func (cr *cacheRecord) size(ctx context.Context) (int64, error) { isDead := cr.isDead() cr.mu.Unlock() if isDead { - return int64(0), nil + return 0, nil } if !errors.Is(err, errdefs.ErrNotFound) { return s, errors.Wrapf(err, "failed to get usage for %s", cr.ID()) @@ -376,10 +379,6 @@ func (cr *cacheRecord) size(ctx context.Context) (int64, error) { cr.mu.Unlock() return usage.Size, nil }) - if err != nil { - return 0, err - } - return s.(int64), nil } // caller must hold cr.mu @@ -438,7 +437,19 @@ func (cr *cacheRecord) mount(ctx context.Context, s session.Group) (_ snapshot.M } // call when holding the manager lock -func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error { +func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) (rerr error) { + defer func() { + l := bklog.G(ctx).WithFields(map[string]any{ + "id": cr.ID(), + "refCount": len(cr.refs), + "removeSnapshot": removeSnapshot, + "stack": bklog.LazyStackTrace{}, + }) + if rerr != nil { + l = l.WithError(rerr) + } + l.Trace("removed cache record") + }() delete(cr.cm.records, cr.ID()) if removeSnapshot { if err := cr.cm.LeaseManager.Delete(ctx, leases.Lease{ @@ -469,6 +480,24 @@ type immutableRef struct { progress progress.Controller } +// hold ref lock before calling +func (sr *immutableRef) traceLogFields() logrus.Fields { + m := map[string]any{ + "id": sr.ID(), + "refID": fmt.Sprintf("%p", sr), + "newRefCount": len(sr.refs), + "mutable": false, + "stack": bklog.LazyStackTrace{}, + } + if sr.equalMutable != nil { + m["equalMutableID"] = sr.equalMutable.ID() + } + if sr.equalImmutable != nil { + m["equalImmutableID"] = sr.equalImmutable.ID() + } + return m +} + // Order is from parent->child, sr will be at end of slice. Refs should not // be released as they are used internally in the underlying cacheRecords. func (sr *immutableRef) layerChain() []*immutableRef { @@ -591,6 +620,24 @@ type mutableRef struct { descHandlers DescHandlers } +// hold ref lock before calling +func (sr *mutableRef) traceLogFields() logrus.Fields { + m := map[string]any{ + "id": sr.ID(), + "refID": fmt.Sprintf("%p", sr), + "newRefCount": len(sr.refs), + "mutable": true, + "stack": bklog.LazyStackTrace{}, + } + if sr.equalMutable != nil { + m["equalMutableID"] = sr.equalMutable.ID() + } + if sr.equalImmutable != nil { + m["equalImmutableID"] = sr.equalImmutable.ID() + } + return m +} + func (sr *mutableRef) DescHandler(dgst digest.Digest) *DescHandler { return sr.descHandlers[dgst] } @@ -615,11 +662,11 @@ func layerToDistributable(mt string) string { } switch mt { - case ocispecs.MediaTypeImageLayerNonDistributable: + case ocispecs.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return ocispecs.MediaTypeImageLayer - case ocispecs.MediaTypeImageLayerNonDistributableGzip: + case ocispecs.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return ocispecs.MediaTypeImageLayerGzip - case ocispecs.MediaTypeImageLayerNonDistributableZstd: + case ocispecs.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return ocispecs.MediaTypeImageLayerZstd case images.MediaTypeDockerSchema2LayerForeign: return images.MediaTypeDockerSchema2Layer @@ -633,11 +680,11 @@ func layerToDistributable(mt string) string { func layerToNonDistributable(mt string) string { switch mt { case ocispecs.MediaTypeImageLayer: - return ocispecs.MediaTypeImageLayerNonDistributable + return ocispecs.MediaTypeImageLayerNonDistributable //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. case ocispecs.MediaTypeImageLayerGzip: - return ocispecs.MediaTypeImageLayerNonDistributableGzip + return ocispecs.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. case ocispecs.MediaTypeImageLayerZstd: - return ocispecs.MediaTypeImageLayerNonDistributableZstd + return ocispecs.MediaTypeImageLayerNonDistributableZstd //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. case images.MediaTypeDockerSchema2Layer: return images.MediaTypeDockerSchema2LayerForeign case images.MediaTypeDockerSchema2LayerForeignGzip: @@ -993,7 +1040,7 @@ func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context, info.Labels[k] = "" // Remove labels appended in this call } if _, err := r.cm.Snapshotter.Update(ctx, info, flds...); err != nil { - logrus.Warn(errors.Wrapf(err, "failed to remove tmp remote labels")) + bklog.G(ctx).Warn(errors.Wrapf(err, "failed to remove tmp remote labels")) } }() @@ -1006,7 +1053,7 @@ func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context, } func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s session.Group) error { - _, err := sr.sizeG.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ interface{}, rerr error) { + _, err := g.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ struct{}, rerr error) { dhs := sr.descHandlers for _, r := range sr.layerChain() { r := r @@ -1018,7 +1065,7 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s dh := dhs[digest.Digest(r.getBlob())] if dh == nil { // We cannot prepare remote snapshots without descHandler. - return nil, nil + return struct{}{}, nil } // tmpLabels contains dh.SnapshotLabels + session IDs. All keys contain @@ -1055,7 +1102,7 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s info.Labels[k] = "" } if _, err := r.cm.Snapshotter.Update(ctx, info, tmpFields...); err != nil { - logrus.Warn(errors.Wrapf(err, + bklog.G(ctx).Warn(errors.Wrapf(err, "failed to remove tmp remote labels after prepare")) } }() @@ -1070,7 +1117,7 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s break } - return nil, nil + return struct{}{}, nil }) return err } @@ -1093,18 +1140,18 @@ func makeTmpLabelsStargzMode(labels map[string]string, s session.Group) (fields } func (sr *immutableRef) unlazy(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool) error { - _, err := sr.sizeG.Do(ctx, sr.ID()+"-unlazy", func(ctx context.Context) (_ interface{}, rerr error) { + _, err := g.Do(ctx, sr.ID()+"-unlazy", func(ctx context.Context) (_ struct{}, rerr error) { if _, err := sr.cm.Snapshotter.Stat(ctx, sr.getSnapshotID()); err == nil { - return nil, nil + return struct{}{}, nil } switch sr.kind() { case Merge, Diff: - return nil, sr.unlazyDiffMerge(ctx, dhs, pg, s, topLevel) + return struct{}{}, sr.unlazyDiffMerge(ctx, dhs, pg, s, topLevel) case Layer, BaseLayer: - return nil, sr.unlazyLayer(ctx, dhs, pg, s) + return struct{}{}, sr.unlazyLayer(ctx, dhs, pg, s) } - return nil, nil + return struct{}{}, nil }) return err } @@ -1294,9 +1341,16 @@ func (sr *immutableRef) updateLastUsedNow() bool { return true } -func (sr *immutableRef) release(ctx context.Context) error { - delete(sr.refs, sr) +func (sr *immutableRef) release(ctx context.Context) (rerr error) { + defer func() { + l := bklog.G(ctx).WithFields(sr.traceLogFields()) + if rerr != nil { + l = l.WithError(rerr) + } + l.Trace("released cache ref") + }() + delete(sr.refs, sr) if sr.updateLastUsedNow() { sr.updateLastUsed() if sr.equalMutable != nil { @@ -1363,7 +1417,7 @@ func (cr *cacheRecord) finalize(ctx context.Context) error { cr.cm.mu.Lock() defer cr.cm.mu.Unlock() if err := mutable.remove(context.TODO(), true); err != nil { - logrus.Error(err) + bklog.G(ctx).Error(err) } }() @@ -1476,8 +1530,16 @@ func (sr *mutableRef) Release(ctx context.Context) error { return sr.release(ctx) } -func (sr *mutableRef) release(ctx context.Context) error { +func (sr *mutableRef) release(ctx context.Context) (rerr error) { + defer func() { + l := bklog.G(ctx).WithFields(sr.traceLogFields()) + if rerr != nil { + l = l.WithError(rerr) + } + l.Trace("released cache ref") + }() delete(sr.refs, sr) + if !sr.HasCachePolicyRetain() { if sr.equalImmutable != nil { if sr.equalImmutable.HasCachePolicyRetain() { @@ -1514,7 +1576,7 @@ func (m *readOnlyMounter) Mount() ([]mount.Mount, func() error, error) { return nil, nil, err } for i, m := range mounts { - if m.Type == "overlay" { + if overlay.IsOverlayMountType(m) { mounts[i].Options = readonlyOverlay(m.Options) continue } @@ -1624,7 +1686,7 @@ func (sm *sharableMountable) Mount() (_ []mount.Mount, _ func() error, retErr er }() var isOverlay bool for _, m := range mounts { - if m.Type == "overlay" { + if overlay.IsOverlayMountType(m) { isOverlay = true break } diff --git a/vendor/github.com/moby/buildkit/cache/remote.go b/vendor/github.com/moby/buildkit/cache/remote.go index b80bd79cfb..cfafef4cb5 100644 --- a/vendor/github.com/moby/buildkit/cache/remote.go +++ b/vendor/github.com/moby/buildkit/cache/remote.go @@ -305,11 +305,11 @@ func (p lazyRefProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) } func (p lazyRefProvider) Unlazy(ctx context.Context) error { - _, err := p.ref.cm.unlazyG.Do(ctx, string(p.desc.Digest), func(ctx context.Context) (_ interface{}, rerr error) { + _, err := p.ref.cm.unlazyG.Do(ctx, string(p.desc.Digest), func(ctx context.Context) (_ struct{}, rerr error) { if isLazy, err := p.ref.isLazy(ctx); err != nil { - return nil, err + return struct{}{}, err } else if !isLazy { - return nil, nil + return struct{}{}, nil } defer func() { if rerr == nil { @@ -320,7 +320,7 @@ func (p lazyRefProvider) Unlazy(ctx context.Context) error { if p.dh == nil { // shouldn't happen, if you have a lazy immutable ref it already should be validated // that descriptor handlers exist for it - return nil, errors.New("unexpected nil descriptor handler") + return struct{}{}, errors.New("unexpected nil descriptor handler") } if p.dh.Progress != nil { @@ -337,7 +337,7 @@ func (p lazyRefProvider) Unlazy(ctx context.Context) error { Manager: p.ref.cm.ContentStore, }, p.desc, p.dh.Ref, logs.LoggerFromContext(ctx)) if err != nil { - return nil, err + return struct{}{}, err } if imageRefs := p.ref.getImageRefs(); len(imageRefs) > 0 { @@ -345,12 +345,12 @@ func (p lazyRefProvider) Unlazy(ctx context.Context) error { imageRef := imageRefs[0] if p.ref.GetDescription() == "" { if err := p.ref.SetDescription("pulled from " + imageRef); err != nil { - return nil, err + return struct{}{}, err } } } - return nil, nil + return struct{}{}, nil }) return err } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/export.go b/vendor/github.com/moby/buildkit/cache/remotecache/export.go index a0fd7ba7e2..fbb475132d 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/export.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/export.go @@ -16,7 +16,7 @@ import ( "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/progress/logs" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" + "github.com/opencontainers/image-spec/specs-go" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -37,24 +37,135 @@ type Config struct { Compression compression.Config } +type CacheType int + const ( // ExportResponseManifestDesc is a key for the map returned from Exporter.Finalize. // The map value is a JSON string of an OCI desciptor of a manifest. ExporterResponseManifestDesc = "cache.manifest" ) -type contentCacheExporter struct { - solver.CacheExporterTarget - chains *v1.CacheChains - ingester content.Ingester - oci bool - ref string - comp compression.Config +const ( + NotSet CacheType = iota + ManifestList + ImageManifest +) + +func (data CacheType) String() string { + switch data { + case ManifestList: + return "Manifest List" + case ImageManifest: + return "Image Manifest" + default: + return "Not Set" + } } -func NewExporter(ingester content.Ingester, ref string, oci bool, compressionConfig compression.Config) Exporter { +func NewExporter(ingester content.Ingester, ref string, oci bool, imageManifest bool, compressionConfig compression.Config) Exporter { cc := v1.NewCacheChains() - return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester, oci: oci, ref: ref, comp: compressionConfig} + return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester, oci: oci, imageManifest: imageManifest, ref: ref, comp: compressionConfig} +} + +type ExportableCache struct { + // This cache describes two distinct styles of exportable cache, one is an Index (or Manifest List) of blobs, + // or as an artifact using the OCI image manifest format. + ExportedManifest ocispecs.Manifest + ExportedIndex ocispecs.Index + CacheType CacheType + OCI bool +} + +func NewExportableCache(oci bool, imageManifest bool) (*ExportableCache, error) { + var mediaType string + + if imageManifest { + mediaType = ocispecs.MediaTypeImageManifest + if !oci { + return nil, errors.Errorf("invalid configuration for remote cache") + } + } else { + if oci { + mediaType = ocispecs.MediaTypeImageIndex + } else { + mediaType = images.MediaTypeDockerSchema2ManifestList + } + } + + cacheType := ManifestList + if imageManifest { + cacheType = ImageManifest + } + + schemaVersion := specs.Versioned{SchemaVersion: 2} + switch cacheType { + case ManifestList: + return &ExportableCache{ExportedIndex: ocispecs.Index{ + MediaType: mediaType, + Versioned: schemaVersion, + }, + CacheType: cacheType, + OCI: oci, + }, nil + case ImageManifest: + return &ExportableCache{ExportedManifest: ocispecs.Manifest{ + MediaType: mediaType, + Versioned: schemaVersion, + }, + CacheType: cacheType, + OCI: oci, + }, nil + default: + return nil, errors.Errorf("exportable cache type not set") + } +} + +func (ec *ExportableCache) MediaType() string { + if ec.CacheType == ManifestList { + return ec.ExportedIndex.MediaType + } + return ec.ExportedManifest.MediaType +} + +func (ec *ExportableCache) AddCacheBlob(blob ocispecs.Descriptor) { + if ec.CacheType == ManifestList { + ec.ExportedIndex.Manifests = append(ec.ExportedIndex.Manifests, blob) + } else { + ec.ExportedManifest.Layers = append(ec.ExportedManifest.Layers, blob) + } +} + +func (ec *ExportableCache) FinalizeCache(ctx context.Context) { + if ec.CacheType == ManifestList { + ec.ExportedIndex.Manifests = compression.ConvertAllLayerMediaTypes(ctx, ec.OCI, ec.ExportedIndex.Manifests...) + } else { + ec.ExportedManifest.Layers = compression.ConvertAllLayerMediaTypes(ctx, ec.OCI, ec.ExportedManifest.Layers...) + } +} + +func (ec *ExportableCache) SetConfig(config ocispecs.Descriptor) { + if ec.CacheType == ManifestList { + ec.ExportedIndex.Manifests = append(ec.ExportedIndex.Manifests, config) + } else { + ec.ExportedManifest.Config = config + } +} + +func (ec *ExportableCache) MarshalJSON() ([]byte, error) { + if ec.CacheType == ManifestList { + return json.Marshal(ec.ExportedIndex) + } + return json.Marshal(ec.ExportedManifest) +} + +type contentCacheExporter struct { + solver.CacheExporterTarget + chains *v1.CacheChains + ingester content.Ingester + oci bool + imageManifest bool + ref string + comp compression.Config } func (ce *contentCacheExporter) Name() string { @@ -74,21 +185,9 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string return nil, err } - // own type because oci type can't be pushed and docker type doesn't have annotations - type manifestList struct { - specs.Versioned - - MediaType string `json:"mediaType,omitempty"` - - // Manifests references platform specific manifests. - Manifests []ocispecs.Descriptor `json:"manifests"` - } - - var mfst manifestList - mfst.SchemaVersion = 2 - mfst.MediaType = images.MediaTypeDockerSchema2ManifestList - if ce.oci { - mfst.MediaType = ocispecs.MediaTypeImageIndex + cache, err := NewExportableCache(ce.oci, ce.imageManifest) + if err != nil { + return nil, err } for _, l := range config.Layers { @@ -101,10 +200,10 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string return nil, layerDone(errors.Wrap(err, "error writing layer blob")) } layerDone(nil) - mfst.Manifests = append(mfst.Manifests, dgstPair.Descriptor) + cache.AddCacheBlob(dgstPair.Descriptor) } - mfst.Manifests = compression.ConvertAllLayerMediaTypes(ce.oci, mfst.Manifests...) + cache.FinalizeCache(ctx) dt, err := json.Marshal(config) if err != nil { @@ -122,9 +221,9 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string } configDone(nil) - mfst.Manifests = append(mfst.Manifests, desc) + cache.SetConfig(desc) - dt, err = json.Marshal(mfst) + dt, err = cache.MarshalJSON() if err != nil { return nil, errors.Wrap(err, "failed to marshal manifest") } @@ -133,9 +232,14 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string desc = ocispecs.Descriptor{ Digest: dgst, Size: int64(len(dt)), - MediaType: mfst.MediaType, + MediaType: cache.MediaType(), } - mfstDone := progress.OneOff(ctx, fmt.Sprintf("writing manifest %s", dgst)) + + mfstLog := fmt.Sprintf("writing cache manifest %s", dgst) + if ce.imageManifest { + mfstLog = fmt.Sprintf("writing cache image manifest %s", dgst) + } + mfstDone := progress.OneOff(ctx, mfstLog) if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { return nil, mfstDone(errors.Wrap(err, "error writing manifest blob")) } @@ -145,5 +249,6 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string } res[ExporterResponseManifestDesc] = string(descJSON) mfstDone(nil) + return res, nil } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go b/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go index f36693d3b0..c24755e93d 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go @@ -15,6 +15,7 @@ import ( v1 "github.com/moby/buildkit/cache/remotecache/v1" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/tracing" @@ -22,13 +23,12 @@ import ( digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" actionscache "github.com/tonistiigi/go-actions-cache" "golang.org/x/sync/errgroup" ) func init() { - actionscache.Log = logrus.Debugf + actionscache.Log = bklog.L.Debugf } const ( @@ -92,7 +92,7 @@ func NewExporter(c *Config) (remotecache.Exporter, error) { } func (*exporter) Name() string { - return "exporting to GitHub cache" + return "exporting to GitHub Actions Cache" } func (ce *exporter) Config() remotecache.Config { diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/import.go b/vendor/github.com/moby/buildkit/cache/remotecache/import.go index 6278090187..347d935e4a 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/import.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/import.go @@ -3,6 +3,7 @@ package remotecache import ( "context" "encoding/json" + "fmt" "io" "sync" "time" @@ -12,12 +13,13 @@ import ( v1 "github.com/moby/buildkit/cache/remotecache/v1" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) @@ -47,24 +49,52 @@ func (ci *contentCacheImporter) Resolve(ctx context.Context, desc ocispecs.Descr return nil, err } - var mfst ocispecs.Index - if err := json.Unmarshal(dt, &mfst); err != nil { + manifestType, err := imageutil.DetectManifestBlobMediaType(dt) + if err != nil { return nil, err } - allLayers := v1.DescriptorProvider{} + layerDone := progress.OneOff(ctx, fmt.Sprintf("inferred cache manifest type: %s", manifestType)) + layerDone(nil) + allLayers := v1.DescriptorProvider{} var configDesc ocispecs.Descriptor - for _, m := range mfst.Manifests { - if m.MediaType == v1.CacheConfigMediaTypeV0 { - configDesc = m - continue + switch manifestType { + case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex: + var mfst ocispecs.Index + if err := json.Unmarshal(dt, &mfst); err != nil { + return nil, err } - allLayers[m.Digest] = v1.DescriptorProviderPair{ - Descriptor: m, - Provider: ci.provider, + + for _, m := range mfst.Manifests { + if m.MediaType == v1.CacheConfigMediaTypeV0 { + configDesc = m + continue + } + allLayers[m.Digest] = v1.DescriptorProviderPair{ + Descriptor: m, + Provider: ci.provider, + } } + case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest: + var mfst ocispecs.Manifest + if err := json.Unmarshal(dt, &mfst); err != nil { + return nil, err + } + + if mfst.Config.MediaType == v1.CacheConfigMediaTypeV0 { + configDesc = mfst.Config + } + for _, m := range mfst.Layers { + allLayers[m.Digest] = v1.DescriptorProviderPair{ + Descriptor: m, + Provider: ci.provider, + } + } + default: + err = errors.Wrapf(err, "unsupported or uninferrable manifest type") + return nil, err } if dsls, ok := ci.provider.(DistributionSourceLabelSetter); ok { @@ -162,7 +192,7 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte } if len(img.Rootfs.DiffIDs) != len(m.Layers) { - logrus.Warnf("invalid image with mismatching manifest and config") + bklog.G(ctx).Warnf("invalid image with mismatching manifest and config") return nil } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go b/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go index 036ec059f7..3b7b0c68d2 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go @@ -8,10 +8,10 @@ import ( v1 "github.com/moby/buildkit/cache/remotecache/v1" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc { @@ -85,7 +85,7 @@ func (ce *exporter) ExportForLayers(ctx context.Context, layers []digest.Digest) } if len(cfg.Layers) == 0 { - logrus.Warn("failed to match any cache with layers") + bklog.G(ctx).Warn("failed to match any cache with layers") return nil, nil } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go index 7f3d83b70f..818f9b441e 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go @@ -19,13 +19,19 @@ const ( attrDigest = "digest" attrSrc = "src" attrDest = "dest" + attrImageManifest = "image-manifest" attrOCIMediatypes = "oci-mediatypes" contentStoreIDPrefix = "local:" - attrLayerCompression = "compression" - attrForceCompression = "force-compression" - attrCompressionLevel = "compression-level" ) +type exporter struct { + remotecache.Exporter +} + +func (*exporter) Name() string { + return "exporting cache to client directory" +} + // ResolveCacheExporterFunc for "local" cache exporter. func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExporterFunc { return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) { @@ -33,7 +39,7 @@ func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExpor if store == "" { return nil, errors.New("local cache exporter requires dest") } - compressionConfig, err := attrsToCompression(attrs) + compressionConfig, err := compression.ParseAttributes(attrs) if err != nil { return nil, err } @@ -45,12 +51,20 @@ func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExpor } ociMediatypes = b } + imageManifest := false + if v, ok := attrs[attrImageManifest]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", attrImageManifest) + } + imageManifest = b + } csID := contentStoreIDPrefix + store cs, err := getContentStore(ctx, sm, g, csID) if err != nil { return nil, err } - return remotecache.NewExporter(cs, "", ociMediatypes, *compressionConfig), nil + return &exporter{remotecache.NewExporter(cs, "", ociMediatypes, imageManifest, compressionConfig)}, nil } } @@ -109,38 +123,3 @@ type unlazyProvider struct { func (p *unlazyProvider) UnlazySession(desc ocispecs.Descriptor) session.Group { return p.s } - -func attrsToCompression(attrs map[string]string) (*compression.Config, error) { - var compressionType compression.Type - if v, ok := attrs[attrLayerCompression]; ok { - c, err := compression.Parse(v) - if err != nil { - return nil, err - } - compressionType = c - } else { - compressionType = compression.Default - } - compressionConfig := compression.New(compressionType) - if v, ok := attrs[attrForceCompression]; ok { - var force bool - if v == "" { - force = true - } else { - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, attrForceCompression) - } - force = b - } - compressionConfig = compressionConfig.SetForce(force) - } - if v, ok := attrs[attrCompressionLevel]; ok { - ii, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, errors.Wrapf(err, "non-integer value %s specified for %s", v, attrCompressionLevel) - } - compressionConfig = compressionConfig.SetLevel(int(ii)) - } - return &compressionConfig, nil -} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go index e3b32eb296..007da98855 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go @@ -15,34 +15,43 @@ import ( "github.com/moby/buildkit/util/estargz" "github.com/moby/buildkit/util/push" "github.com/moby/buildkit/util/resolver" + resolverconfig "github.com/moby/buildkit/util/resolver/config" "github.com/moby/buildkit/util/resolver/limited" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -func canonicalizeRef(rawRef string) (string, error) { +func canonicalizeRef(rawRef string) (reference.Named, error) { if rawRef == "" { - return "", errors.New("missing ref") + return nil, errors.New("missing ref") } parsed, err := reference.ParseNormalizedNamed(rawRef) if err != nil { - return "", err + return nil, err } - return reference.TagNameOnly(parsed).String(), nil + parsed = reference.TagNameOnly(parsed) + return parsed, nil } const ( - attrRef = "ref" - attrOCIMediatypes = "oci-mediatypes" - attrLayerCompression = "compression" - attrForceCompression = "force-compression" - attrCompressionLevel = "compression-level" + attrRef = "ref" + attrImageManifest = "image-manifest" + attrOCIMediatypes = "oci-mediatypes" + attrInsecure = "registry.insecure" ) +type exporter struct { + remotecache.Exporter +} + +func (*exporter) Name() string { + return "exporting cache to registry" +} + func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) remotecache.ResolveCacheExporterFunc { return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) { - compressionConfig, err := attrsToCompression(attrs) + compressionConfig, err := compression.ParseAttributes(attrs) if err != nil { return nil, err } @@ -50,6 +59,7 @@ func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) r if err != nil { return nil, err } + refString := ref.String() ociMediatypes := true if v, ok := attrs[attrOCIMediatypes]; ok { b, err := strconv.ParseBool(v) @@ -58,12 +68,30 @@ func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) r } ociMediatypes = b } - remote := resolver.DefaultPool.GetResolver(hosts, ref, "push", sm, g) - pusher, err := push.Pusher(ctx, remote, ref) + imageManifest := false + if v, ok := attrs[attrImageManifest]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", attrImageManifest) + } + imageManifest = b + } + insecure := false + if v, ok := attrs[attrInsecure]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", attrInsecure) + } + insecure = b + } + + scope, hosts := registryConfig(hosts, ref, "push", insecure) + remote := resolver.DefaultPool.GetResolver(hosts, refString, scope, sm, g) + pusher, err := push.Pusher(ctx, remote, refString) if err != nil { return nil, err } - return remotecache.NewExporter(contentutil.FromPusher(pusher), ref, ociMediatypes, *compressionConfig), nil + return &exporter{remotecache.NewExporter(contentutil.FromPusher(pusher), refString, ociMediatypes, imageManifest, compressionConfig)}, nil } } @@ -73,8 +101,19 @@ func ResolveCacheImporterFunc(sm *session.Manager, cs content.Store, hosts docke if err != nil { return nil, ocispecs.Descriptor{}, err } - remote := resolver.DefaultPool.GetResolver(hosts, ref, "pull", sm, g) - xref, desc, err := remote.Resolve(ctx, ref) + refString := ref.String() + insecure := false + if v, ok := attrs[attrInsecure]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, ocispecs.Descriptor{}, errors.Wrapf(err, "failed to parse %s", attrInsecure) + } + insecure = b + } + + scope, hosts := registryConfig(hosts, ref, "pull", insecure) + remote := resolver.DefaultPool.GetResolver(hosts, refString, scope, sm, g) + xref, desc, err := remote.Resolve(ctx, refString) if err != nil { return nil, ocispecs.Descriptor{}, err } @@ -83,8 +122,8 @@ func ResolveCacheImporterFunc(sm *session.Manager, cs content.Store, hosts docke return nil, ocispecs.Descriptor{}, err } src := &withDistributionSourceLabel{ - Provider: contentutil.FromFetcher(limited.Default.WrapFetcher(fetcher, ref)), - ref: ref, + Provider: contentutil.FromFetcher(limited.Default.WrapFetcher(fetcher, refString)), + ref: refString, source: cs, } return remotecache.NewImporter(src), desc, nil @@ -130,37 +169,17 @@ func (dsl *withDistributionSourceLabel) SnapshotLabels(descs []ocispecs.Descript return labels } -func attrsToCompression(attrs map[string]string) (*compression.Config, error) { - var compressionType compression.Type - if v, ok := attrs[attrLayerCompression]; ok { - c, err := compression.Parse(v) - if err != nil { - return nil, err - } - compressionType = c - } else { - compressionType = compression.Default +func registryConfig(hosts docker.RegistryHosts, ref reference.Named, scope string, insecure bool) (string, docker.RegistryHosts) { + if insecure { + insecureTrue := true + httpTrue := true + hosts = resolver.NewRegistryConfig(map[string]resolverconfig.RegistryConfig{ + reference.Domain(ref): { + Insecure: &insecureTrue, + PlainHTTP: &httpTrue, + }, + }) + scope += ":insecure" } - compressionConfig := compression.New(compressionType) - if v, ok := attrs[attrForceCompression]; ok { - var force bool - if v == "" { - force = true - } else { - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, attrForceCompression) - } - force = b - } - compressionConfig = compressionConfig.SetForce(force) - } - if v, ok := attrs[attrCompressionLevel]; ok { - ii, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, errors.Wrapf(err, "non-integer value %s specified for %s", v, attrCompressionLevel) - } - compressionConfig = compressionConfig.SetLevel(int(ii)) - } - return &compressionConfig, nil + return scope, hosts } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go index a4f7f6ad05..004fac0521 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go @@ -291,7 +291,7 @@ func (cs *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheR return nil, errors.WithStack(solver.ErrNotFound) } -func (cs *cacheResultStorage) Exists(id string) bool { +func (cs *cacheResultStorage) Exists(ctx context.Context, id string) bool { return cs.byResultID(id) != nil } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go index 8c8bbde5dc..11ea24b865 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go @@ -39,7 +39,7 @@ func (c *CacheChains) Visited(v interface{}) bool { return ok } -func (c *CacheChains) normalize() error { +func (c *CacheChains) normalize(ctx context.Context) error { st := &normalizeState{ added: map[*item]*item{}, links: map[*item]map[nlink]map[digest.Digest]struct{}{}, @@ -66,7 +66,7 @@ func (c *CacheChains) normalize() error { } } - st.removeLoops() + st.removeLoops(ctx) items := make([]*item, 0, len(st.byKey)) for _, it := range st.byKey { @@ -77,7 +77,7 @@ func (c *CacheChains) normalize() error { } func (c *CacheChains) Marshal(ctx context.Context) (*CacheConfig, DescriptorProvider, error) { - if err := c.normalize(); err != nil { + if err := c.normalize(ctx); err != nil { return nil, nil, err } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go index f7139035fa..213e670a61 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go @@ -6,10 +6,10 @@ import ( "sort" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/bklog" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // sortConfig sorts the config structure to make sure it is deterministic @@ -128,7 +128,7 @@ type normalizeState struct { next int } -func (s *normalizeState) removeLoops() { +func (s *normalizeState) removeLoops(ctx context.Context) { roots := []digest.Digest{} for dgst, it := range s.byKey { if len(it.links) == 0 { @@ -139,11 +139,11 @@ func (s *normalizeState) removeLoops() { visited := map[digest.Digest]struct{}{} for _, d := range roots { - s.checkLoops(d, visited) + s.checkLoops(ctx, d, visited) } } -func (s *normalizeState) checkLoops(d digest.Digest, visited map[digest.Digest]struct{}) { +func (s *normalizeState) checkLoops(ctx context.Context, d digest.Digest, visited map[digest.Digest]struct{}) { it, ok := s.byKey[d] if !ok { return @@ -165,11 +165,11 @@ func (s *normalizeState) checkLoops(d digest.Digest, visited map[digest.Digest]s continue } if !it2.removeLink(it) { - logrus.Warnf("failed to remove looping cache key %s %s", d, id) + bklog.G(ctx).Warnf("failed to remove looping cache key %s %s", d, id) } delete(links[l], id) } else { - s.checkLoops(id, visited) + s.checkLoops(ctx, id, visited) } } } diff --git a/vendor/github.com/moby/buildkit/cache/util/fsutil.go b/vendor/github.com/moby/buildkit/cache/util/fsutil.go index e90ed45f77..945e017168 100644 --- a/vendor/github.com/moby/buildkit/cache/util/fsutil.go +++ b/vendor/github.com/moby/buildkit/cache/util/fsutil.go @@ -57,21 +57,25 @@ func ReadFile(ctx context.Context, mount snapshot.Mountable, req ReadRequest) ([ return errors.WithStack(err) } - if req.Range == nil { - dt, err = os.ReadFile(fp) - if err != nil { - return errors.WithStack(err) - } - } else { - f, err := os.Open(fp) - if err != nil { - return errors.WithStack(err) - } - dt, err = io.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length))) - f.Close() - if err != nil { - return errors.WithStack(err) + f, err := os.Open(fp) + if err != nil { + // The filename here is internal to the mount, so we can restore + // the request base path for error reporting. + // See os.DirFS.Open for details. + if pe, ok := err.(*os.PathError); ok { + pe.Path = req.Filename } + return errors.WithStack(err) + } + defer f.Close() + + var rdr io.Reader = f + if req.Range != nil { + rdr = io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length)) + } + dt, err = io.ReadAll(rdr) + if err != nil { + return errors.WithStack(err) } return nil }) diff --git a/vendor/github.com/moby/buildkit/client/client.go b/vendor/github.com/moby/buildkit/client/client.go index deac2507a9..1d60a70683 100644 --- a/vendor/github.com/moby/buildkit/client/client.go +++ b/vendor/github.com/moby/buildkit/client/client.go @@ -11,7 +11,6 @@ import ( contentapi "github.com/containerd/containerd/api/services/content/v1" "github.com/containerd/containerd/defaults" - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/connhelper" "github.com/moby/buildkit/session" @@ -26,6 +25,7 @@ import ( sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) @@ -35,7 +35,9 @@ type Client struct { sessionDialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) } -type ClientOpt interface{} +type ClientOpt interface { + isClientOpt() +} // New returns a new buildkit client. Address can be empty for the system-default address. func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) { @@ -44,8 +46,6 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)), } needDialer := true - needWithInsecure := true - tlsServerName := "" var unary []grpc.UnaryClientInterceptor var stream []grpc.StreamClientInterceptor @@ -54,19 +54,18 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error var tracerProvider trace.TracerProvider var tracerDelegate TracerDelegate var sessionDialer func(context.Context, string, map[string][]string) (net.Conn, error) + var customDialOptions []grpc.DialOption + var creds *withCredentials for _, o := range opts { if _, ok := o.(*withFailFast); ok { gopts = append(gopts, grpc.FailOnNonTempDialError(true)) } if credInfo, ok := o.(*withCredentials); ok { - opt, err := loadCredentials(credInfo) - if err != nil { - return nil, err + if creds == nil { + creds = &withCredentials{} } - gopts = append(gopts, opt) - needWithInsecure = false - tlsServerName = credInfo.ServerName + creds = creds.merge(credInfo) } if wt, ok := o.(*withTracer); ok { customTracer = true @@ -82,6 +81,19 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error if sd, ok := o.(*withSessionDialer); ok { sessionDialer = sd.dialer } + if opt, ok := o.(*withGRPCDialOption); ok { + customDialOptions = append(customDialOptions, opt.opt) + } + } + + if creds == nil { + gopts = append(gopts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + credOpts, err := loadCredentials(creds) + if err != nil { + return nil, err + } + gopts = append(gopts, credOpts) } if !customTracer { @@ -103,9 +115,6 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error } gopts = append(gopts, grpc.WithContextDialer(dialFn)) } - if needWithInsecure { - gopts = append(gopts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } if address == "" { address = appdefaults.Address } @@ -117,7 +126,10 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error // ref: https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.3 // - However, when TLS specified, grpc-go requires it must match // with its servername specified for certificate validation. - authority := tlsServerName + var authority string + if creds != nil && creds.serverName != "" { + authority = creds.serverName + } if authority == "" { // authority as hostname from target address uri, err := url.Parse(address) @@ -131,17 +143,9 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error unary = append(unary, grpcerrors.UnaryClientInterceptor) stream = append(stream, grpcerrors.StreamClientInterceptor) - if len(unary) == 1 { - gopts = append(gopts, grpc.WithUnaryInterceptor(unary[0])) - } else if len(unary) > 1 { - gopts = append(gopts, grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unary...))) - } - - if len(stream) == 1 { - gopts = append(gopts, grpc.WithStreamInterceptor(stream[0])) - } else if len(stream) > 1 { - gopts = append(gopts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...))) - } + gopts = append(gopts, grpc.WithChainUnaryInterceptor(unary...)) + gopts = append(gopts, grpc.WithChainStreamInterceptor(stream...)) + gopts = append(gopts, customDialOptions...) conn, err := grpc.DialContext(ctx, address, gopts...) if err != nil { @@ -181,12 +185,27 @@ func (c *Client) Dialer() session.Dialer { return grpchijack.Dialer(c.ControlClient()) } +func (c *Client) Wait(ctx context.Context) error { + opts := []grpc.CallOption{grpc.WaitForReady(true)} + _, err := c.ControlClient().Info(ctx, &controlapi.InfoRequest{}, opts...) + if err != nil { + if code := grpcerrors.Code(err); code == codes.Unimplemented { + // only buildkit v0.11+ supports the info api, but an unimplemented + // response error is still a response so we can ignore it + return nil + } + } + return err +} + func (c *Client) Close() error { return c.conn.Close() } type withFailFast struct{} +func (*withFailFast) isClientOpt() {} + func WithFailFast() ClientOpt { return &withFailFast{} } @@ -195,50 +214,115 @@ type withDialer struct { dialer func(context.Context, string) (net.Conn, error) } +func (*withDialer) isClientOpt() {} + func WithContextDialer(df func(context.Context, string) (net.Conn, error)) ClientOpt { return &withDialer{dialer: df} } type withCredentials struct { - ServerName string - CACert string - Cert string - Key string + // server options + serverName string + caCert string + caCertSystem bool + + // client options + cert string + key string } +func (opts *withCredentials) merge(opts2 *withCredentials) *withCredentials { + result := *opts + if opts2 == nil { + return &result + } + + // server options + if opts2.serverName != "" { + result.serverName = opts2.serverName + } + if opts2.caCert != "" { + result.caCert = opts2.caCert + } + if opts2.caCertSystem { + result.caCertSystem = opts2.caCertSystem + } + + // client options + if opts2.cert != "" { + result.cert = opts2.cert + } + if opts2.key != "" { + result.key = opts2.key + } + + return &result +} + +func (*withCredentials) isClientOpt() {} + // WithCredentials configures the TLS parameters of the client. // Arguments: -// * serverName: specifies the name of the target server -// * ca: specifies the filepath of the CA certificate to use for verification -// * cert: specifies the filepath of the client certificate -// * key: specifies the filepath of the client key -func WithCredentials(serverName, ca, cert, key string) ClientOpt { - return &withCredentials{serverName, ca, cert, key} +// * cert: specifies the filepath of the client certificate +// * key: specifies the filepath of the client key +func WithCredentials(cert, key string) ClientOpt { + return &withCredentials{ + cert: cert, + key: key, + } +} + +// WithServerConfig configures the TLS parameters to connect to the server. +// Arguments: +// * serverName: specifies the server name to verify the hostname +// * caCert: specifies the filepath of the CA certificate +func WithServerConfig(serverName, caCert string) ClientOpt { + return &withCredentials{ + serverName: serverName, + caCert: caCert, + } +} + +// WithServerConfigSystem configures the TLS parameters to connect to the +// server, using the system's certificate pool. +func WithServerConfigSystem(serverName string) ClientOpt { + return &withCredentials{ + serverName: serverName, + caCertSystem: true, + } } func loadCredentials(opts *withCredentials) (grpc.DialOption, error) { - ca, err := os.ReadFile(opts.CACert) - if err != nil { - return nil, errors.Wrap(err, "could not read ca certificate") + cfg := &tls.Config{} + + if opts.caCertSystem { + cfg.RootCAs, _ = x509.SystemCertPool() + } + if cfg.RootCAs == nil { + cfg.RootCAs = x509.NewCertPool() } - certPool := x509.NewCertPool() - if ok := certPool.AppendCertsFromPEM(ca); !ok { - return nil, errors.New("failed to append ca certs") + if opts.caCert != "" { + ca, err := os.ReadFile(opts.caCert) + if err != nil { + return nil, errors.Wrap(err, "could not read ca certificate") + } + if ok := cfg.RootCAs.AppendCertsFromPEM(ca); !ok { + return nil, errors.New("failed to append ca certs") + } } - cfg := &tls.Config{ - ServerName: opts.ServerName, - RootCAs: certPool, + if opts.serverName != "" { + cfg.ServerName = opts.serverName } // we will produce an error if the user forgot about either cert or key if at least one is specified - if opts.Cert != "" || opts.Key != "" { - cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key) + if opts.cert != "" || opts.key != "" { + cert, err := tls.LoadX509KeyPair(opts.cert, opts.key) if err != nil { return nil, errors.Wrap(err, "could not read certificate/key") } - cfg.Certificates = []tls.Certificate{cert} + cfg.Certificates = append(cfg.Certificates, cert) } return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil @@ -252,6 +336,8 @@ type withTracer struct { tp trace.TracerProvider } +func (w *withTracer) isClientOpt() {} + type TracerDelegate interface { SetSpanExporter(context.Context, sdktrace.SpanExporter) error } @@ -266,6 +352,8 @@ type withTracerDelegate struct { TracerDelegate } +func (w *withTracerDelegate) isClientOpt() {} + func WithSessionDialer(dialer func(context.Context, string, map[string][]string) (net.Conn, error)) ClientOpt { return &withSessionDialer{dialer} } @@ -274,6 +362,8 @@ type withSessionDialer struct { dialer func(context.Context, string, map[string][]string) (net.Conn, error) } +func (w *withSessionDialer) isClientOpt() {} + func resolveDialer(address string) (func(context.Context, string) (net.Conn, error), error) { ch, err := connhelper.GetConnectionHelper(address) if err != nil { @@ -294,3 +384,13 @@ func filterInterceptor(intercept grpc.UnaryClientInterceptor) grpc.UnaryClientIn return intercept(ctx, method, req, reply, cc, invoker, opts...) } } + +type withGRPCDialOption struct { + opt grpc.DialOption +} + +func (*withGRPCDialOption) isClientOpt() {} + +func WithGRPCDialOption(opt grpc.DialOption) ClientOpt { + return &withGRPCDialOption{opt} +} diff --git a/vendor/github.com/moby/buildkit/client/llb/async.go b/vendor/github.com/moby/buildkit/client/llb/async.go index 73d2a92fa1..8771c71978 100644 --- a/vendor/github.com/moby/buildkit/client/llb/async.go +++ b/vendor/github.com/moby/buildkit/client/llb/async.go @@ -15,7 +15,7 @@ type asyncState struct { target State set bool err error - g flightcontrol.Group + g flightcontrol.Group[State] } func (as *asyncState) Output() Output { @@ -53,7 +53,7 @@ func (as *asyncState) ToInput(ctx context.Context, c *Constraints) (*pb.Input, e } func (as *asyncState) Do(ctx context.Context, c *Constraints) error { - _, err := as.g.Do(ctx, "", func(ctx context.Context) (interface{}, error) { + _, err := as.g.Do(ctx, "", func(ctx context.Context) (State, error) { if as.set { return as.target, as.err } diff --git a/vendor/github.com/moby/buildkit/client/llb/definition.go b/vendor/github.com/moby/buildkit/client/llb/definition.go index f92ee2d0ab..627accfebc 100644 --- a/vendor/github.com/moby/buildkit/client/llb/definition.go +++ b/vendor/github.com/moby/buildkit/client/llb/definition.go @@ -24,7 +24,7 @@ type DefinitionOp struct { platforms map[digest.Digest]*ocispecs.Platform dgst digest.Digest index pb.OutputIndex - inputCache map[digest.Digest][]*DefinitionOp + inputCache *sync.Map // shared and written among DefinitionOps so avoid race on this map using sync.Map } // NewDefinitionOp returns a new operation from a marshalled definition. @@ -70,7 +70,7 @@ func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) { state := NewState(op) st = &state } - sourceMaps[i] = NewSourceMap(st, info.Filename, info.Data) + sourceMaps[i] = NewSourceMap(st, info.Filename, info.Language, info.Data) } for dgst, locs := range def.Source.Locations { @@ -101,7 +101,7 @@ func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) { platforms: platforms, dgst: dgst, index: index, - inputCache: make(map[digest.Digest][]*DefinitionOp), + inputCache: new(sync.Map), }, nil } @@ -180,6 +180,18 @@ func (d *DefinitionOp) Output() Output { }} } +func (d *DefinitionOp) loadInputCache(dgst digest.Digest) ([]*DefinitionOp, bool) { + a, ok := d.inputCache.Load(dgst.String()) + if ok { + return a.([]*DefinitionOp), true + } + return nil, false +} + +func (d *DefinitionOp) storeInputCache(dgst digest.Digest, c []*DefinitionOp) { + d.inputCache.Store(dgst.String(), c) +} + func (d *DefinitionOp) Inputs() []Output { if d.dgst == "" { return nil @@ -195,7 +207,7 @@ func (d *DefinitionOp) Inputs() []Output { for _, input := range op.Inputs { var vtx *DefinitionOp d.mu.Lock() - if existingIndexes, ok := d.inputCache[input.Digest]; ok { + if existingIndexes, ok := d.loadInputCache(input.Digest); ok { if int(input.Index) < len(existingIndexes) && existingIndexes[input.Index] != nil { vtx = existingIndexes[input.Index] } @@ -211,14 +223,14 @@ func (d *DefinitionOp) Inputs() []Output { inputCache: d.inputCache, sources: d.sources, } - existingIndexes := d.inputCache[input.Digest] + existingIndexes, _ := d.loadInputCache(input.Digest) indexDiff := int(input.Index) - len(existingIndexes) if indexDiff >= 0 { // make room in the slice for the new index being set existingIndexes = append(existingIndexes, make([]*DefinitionOp, indexDiff+1)...) } existingIndexes[input.Index] = vtx - d.inputCache[input.Digest] = existingIndexes + d.storeInputCache(input.Digest, existingIndexes) } d.mu.Unlock() diff --git a/vendor/github.com/moby/buildkit/client/llb/diff.go b/vendor/github.com/moby/buildkit/client/llb/diff.go index b42fcbbcf4..1de2b6f04d 100644 --- a/vendor/github.com/moby/buildkit/client/llb/diff.go +++ b/vendor/github.com/moby/buildkit/client/llb/diff.go @@ -90,6 +90,8 @@ func (m *DiffOp) Inputs() (out []Output) { return out } +// Diff returns a state that represents the diff of the lower and upper states. +// The returned State is useful for use with [Merge] where you can merge the lower state with the diff. func Diff(lower, upper State, opts ...ConstraintsOpt) State { if lower.Output() == nil { if upper.Output() == nil { @@ -104,5 +106,5 @@ func Diff(lower, upper State, opts ...ConstraintsOpt) State { for _, o := range opts { o.SetConstraintsOption(&c) } - return NewState(NewDiff(lower, upper, c).Output()) + return lower.WithOutput(NewDiff(lower, upper, c).Output()) } diff --git a/vendor/github.com/moby/buildkit/client/llb/exec.go b/vendor/github.com/moby/buildkit/client/llb/exec.go index 2b1d9bd3f1..0eed6774c2 100644 --- a/vendor/github.com/moby/buildkit/client/llb/exec.go +++ b/vendor/github.com/moby/buildkit/client/llb/exec.go @@ -339,7 +339,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] inputIndex = pb.Empty } - outputIndex := pb.OutputIndex(-1) + outputIndex := pb.SkipOutput if !m.noOutput && !m.readonly && m.cacheID == "" && !m.tmpfs { outputIndex = pb.OutputIndex(outIndex) outIndex++ @@ -649,6 +649,7 @@ type SSHInfo struct { Optional bool } +// AddSecret is a RunOption that adds a secret to the exec. func AddSecret(dest string, opts ...SecretOption) RunOption { return runOptionFunc(func(ei *ExecInfo) { s := &SecretInfo{ID: dest, Target: dest, Mode: 0400} @@ -696,6 +697,7 @@ func SecretAsEnv(v bool) SecretOption { }) } +// SecretFileOpt sets the secret's target file uid, gid and permissions. func SecretFileOpt(uid, gid, mode int) SecretOption { return secretOptionFunc(func(si *SecretInfo) { si.UID = uid @@ -704,12 +706,15 @@ func SecretFileOpt(uid, gid, mode int) SecretOption { }) } +// ReadonlyRootFS sets the execs's root filesystem to be read-only. func ReadonlyRootFS() RunOption { return runOptionFunc(func(ei *ExecInfo) { ei.ReadonlyRootFS = true }) } +// WithProxy is a RunOption that sets the proxy environment variables in the resulting exec. +// For example `HTTP_PROXY` is a standard environment variable for unix systems that programs may read. func WithProxy(ps ProxyEnv) RunOption { return runOptionFunc(func(ei *ExecInfo) { ei.ProxyEnv = &ps diff --git a/vendor/github.com/moby/buildkit/client/llb/fileop.go b/vendor/github.com/moby/buildkit/client/llb/fileop.go index ffc6da19e4..7fc445c4c9 100644 --- a/vendor/github.com/moby/buildkit/client/llb/fileop.go +++ b/vendor/github.com/moby/buildkit/client/llb/fileop.go @@ -48,6 +48,7 @@ func NewFileOp(s State, action *FileAction, c Constraints) *FileOp { } // CopyInput is either llb.State or *FileActionWithState +// It is used by [Copy] to to specify the source of the copy operation. type CopyInput interface { isFileOpCopyInput() } @@ -60,6 +61,10 @@ type capAdder interface { addCaps(*FileOp) } +// FileAction is used to specify a file operation on a [State]. +// It can be used to create a directory, create a file, or remove a file, etc. +// This is used by [State.File] +// Typically a FileAction is created by calling one of the helper functions such as [Mkdir], [Copy], [Rm], [Mkfile] type FileAction struct { state *State prev *FileAction @@ -131,11 +136,16 @@ type fileActionWithState struct { func (fas *fileActionWithState) isFileOpCopyInput() {} +// Mkdir creates a FileAction which creates a directory at the given path. +// Example: +// +// llb.Scratch().File(llb.Mkdir("/foo", 0755)) func Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction { var mi MkdirInfo for _, o := range opt { o.SetMkdirOption(&mi) } + return &FileAction{ action: &fileActionMkdir{ file: p, @@ -181,6 +191,7 @@ func (fn mkdirOptionFunc) SetMkdirOption(mi *MkdirInfo) { var _ MkdirOption = &MkdirInfo{} +// WithParents is an option for Mkdir which creates parent directories if they do not exist. func WithParents(b bool) MkdirOption { return mkdirOptionFunc(func(mi *MkdirInfo) { mi.MakeParents = b @@ -282,6 +293,10 @@ func (up *UserOpt) marshal(base pb.InputIndex) *pb.UserOpt { return &pb.UserOpt{User: &pb.UserOpt_ByID{ByID: uint32(up.UID)}} } +// Mkfile creates a FileAction which creates a file at the given path with the provided contents. +// Example: +// +// llb.Scratch().File(llb.Mkfile("/foo", 0644, []byte("hello world!"))) func Mkfile(p string, m os.FileMode, dt []byte, opts ...MkfileOption) *FileAction { var mi MkfileInfo for _, o := range opts { @@ -332,6 +347,10 @@ func (a *fileActionMkfile) toProtoAction(ctx context.Context, parent string, bas }, nil } +// Rm creates a FileAction which removes a file or directory at the given path. +// Example: +// +// llb.Scratch().File(Mkfile("/foo", 0644, []byte("not around for long..."))).File(llb.Rm("/foo")) func Rm(p string, opts ...RmOption) *FileAction { var mi RmInfo for _, o := range opts { @@ -394,6 +413,25 @@ func (a *fileActionRm) toProtoAction(ctx context.Context, parent string, base pb }, nil } +// Copy produces a FileAction which copies a file or directory from the source to the destination. +// The "input" parameter is the contents to copy from. +// "src" is the path to copy from within the "input". +// "dest" is the path to copy to within the destination (the state being operated on). +// See [CopyInput] for the valid types of input. +// +// Example: +// +// st := llb.Local(".") +// llb.Scratch().File(llb.Copy(st, "/foo", "/bar")) +// +// The example copies the local (client) directory "./foo" to a new empty directory at /bar. +// +// Note: Copying directories can have different behavior based on if the destination exists or not. +// When the destination already exists, the contents of the source directory is copied underneath the destination, including the directory itself. +// You may need to supply a copy option to copy the dir contents only. +// You may also need to pass in a [CopyOption] which creates parent directories if they do not exist. +// +// See [CopyOption] for more details on what options are available. func Copy(input CopyInput, src, dest string, opts ...CopyOption) *FileAction { var state *State var fas *fileActionWithState @@ -410,7 +448,6 @@ func Copy(input CopyInput, src, dest string, opts ...CopyOption) *FileAction { for _, o := range opts { o.SetCopyOption(&mi) } - return &FileAction{ action: &fileActionCopy{ state: state, @@ -486,22 +523,19 @@ func (a *fileActionCopy) toProtoAction(ctx context.Context, parent string, base func (a *fileActionCopy) sourcePath(ctx context.Context) (string, error) { p := path.Clean(a.src) + dir := "/" + var err error if !path.IsAbs(p) { if a.state != nil { - dir, err := a.state.GetDir(ctx) - if err != nil { - return "", err - } - p = path.Join("/", dir, p) + dir, err = a.state.GetDir(ctx) } else if a.fas != nil { - dir, err := a.fas.state.GetDir(ctx) - if err != nil { - return "", err - } - p = path.Join("/", dir, p) + dir, err = a.fas.state.GetDir(ctx) + } + if err != nil { + return "", err } } - return p, nil + return path.Join(dir, p), nil } func (a *fileActionCopy) addCaps(f *FileOp) { @@ -691,6 +725,7 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] } pop, md := MarshalConstraints(c, &f.constraints) + pop.Platform = nil // file op is not platform specific pop.Op = &pb.Op_File{ File: pfo, } @@ -702,7 +737,7 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] pop.Inputs = state.inputs for i, st := range state.actions { - output := pb.OutputIndex(-1) + output := pb.SkipOutput if i+1 == len(state.actions) { output = 0 } diff --git a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go index 6dd40b6943..8a3a629954 100644 --- a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go +++ b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go @@ -45,7 +45,6 @@ func New(with ...ImageMetaResolverOpt) llb.ImageMetaResolver { headers.Set("User-Agent", version.UserAgent()) return &imageMetaResolver{ resolver: docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, Headers: headers, }), platform: opts.platform, @@ -71,11 +70,12 @@ type imageMetaResolver struct { } type resolveResult struct { + ref string config []byte dgst digest.Digest } -func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) { +func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (string, digest.Digest, []byte, error) { imr.locker.Lock(ref) defer imr.locker.Unlock(ref) @@ -87,16 +87,16 @@ func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string k := imr.key(ref, platform) if res, ok := imr.cache[k]; ok { - return res.dgst, res.config, nil + return res.ref, res.dgst, res.config, nil } - dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, nil, platform) + ref, dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, nil, platform, opt.SourcePolicies) if err != nil { - return "", nil, err + return "", "", nil, err } - imr.cache[k] = resolveResult{dgst: dgst, config: config} - return dgst, config, nil + imr.cache[k] = resolveResult{dgst: dgst, config: config, ref: ref} + return ref, dgst, config, nil } func (imr *imageMetaResolver) key(ref string, platform *ocispecs.Platform) string { diff --git a/vendor/github.com/moby/buildkit/client/llb/merge.go b/vendor/github.com/moby/buildkit/client/llb/merge.go index 8177d71d2a..ee5f653642 100644 --- a/vendor/github.com/moby/buildkit/client/llb/merge.go +++ b/vendor/github.com/moby/buildkit/client/llb/merge.go @@ -70,6 +70,31 @@ func (m *MergeOp) Inputs() []Output { return m.inputs } +// Merge merges multiple states into a single state. This is useful in +// conjunction with [Diff] to create set of patches which are independent of +// each other to a base state without affecting the cache of other merged +// states. +// As an example, lets say you have a rootfs with the following directories: +// +// / /bin /etc /opt /tmp +// +// Now lets say you want to copy a directory /etc/foo from one state and a +// binary /bin/bar from another state. +// [Copy] makes a duplicate of file on top of another directory. +// Merge creates a directory whose contents is an overlay of 2 states on top of each other. +// +// With "Merge" you can do this: +// +// fooState := Diff(rootfs, fooState) +// barState := Diff(rootfs, barState) +// +// Then merge the results with: +// +// Merge(rootfs, fooDiff, barDiff) +// +// The resulting state will have both /etc/foo and /bin/bar, but because Merge +// was used, changing the contents of "fooDiff" does not require copying +// "barDiff" again. func Merge(inputs []State, opts ...ConstraintsOpt) State { // filter out any scratch inputs, which have no effect when merged var filteredInputs []State @@ -92,5 +117,5 @@ func Merge(inputs []State, opts ...ConstraintsOpt) State { o.SetConstraintsOption(&c) } addCap(&c, pb.CapMergeOp) - return NewState(NewMerge(filteredInputs, c).Output()) + return filteredInputs[0].WithOutput(NewMerge(filteredInputs, c).Output()) } diff --git a/vendor/github.com/moby/buildkit/client/llb/meta.go b/vendor/github.com/moby/buildkit/client/llb/meta.go index b98b6d1063..ab1021bd65 100644 --- a/vendor/github.com/moby/buildkit/client/llb/meta.go +++ b/vendor/github.com/moby/buildkit/client/llb/meta.go @@ -10,6 +10,7 @@ import ( "github.com/google/shlex" "github.com/moby/buildkit/solver/pb" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) type contextKeyT string @@ -29,10 +30,15 @@ var ( keySecurity = contextKeyT("llb.security") ) +// AddEnvf is the same as [AddEnv] but allows for a format string. +// This is the equivalent of `[State.AddEnvf]` func AddEnvf(key, value string, v ...interface{}) StateOption { return addEnvf(key, value, true, v...) } +// AddEnv returns a [StateOption] whichs adds an environment variable to the state. +// Use this with [State.With] to create a new state with the environment variable set. +// This is the equivalent of `[State.AddEnv]` func AddEnv(key, value string) StateOption { return addEnvf(key, value, false) } @@ -52,10 +58,14 @@ func addEnvf(key, value string, replace bool, v ...interface{}) StateOption { } } +// Dir returns a [StateOption] sets the working directory for the state which will be used to resolve +// relative paths as well as the working directory for [State.Run]. +// See [State.With] for where to use this. func Dir(str string) StateOption { return dirf(str, false) } +// Dirf is the same as [Dir] but allows for a format string. func Dirf(str string, v ...interface{}) StateOption { return dirf(str, true, v...) } @@ -69,7 +79,7 @@ func dirf(value string, replace bool, v ...interface{}) StateOption { if !path.IsAbs(value) { prev, err := getDir(s)(ctx, c) if err != nil { - return nil, err + return nil, errors.Wrap(err, "getting dir from state") } if prev == "" { prev = "/" @@ -81,12 +91,18 @@ func dirf(value string, replace bool, v ...interface{}) StateOption { } } +// User returns a [StateOption] which sets the user for the state which will be used by [State.Run]. +// This is the equivalent of [State.User] +// See [State.With] for where to use this. func User(str string) StateOption { return func(s State) State { return s.WithValue(keyUser, str) } } +// Reset returns a [StateOption] which creates a new [State] with just the +// output of the current [State] and the provided [State] is set as the parent. +// This is the equivalent of [State.Reset] func Reset(other State) StateOption { return func(s State) State { s = NewState(s.Output()) @@ -147,6 +163,9 @@ func getUser(s State) func(context.Context, *Constraints) (string, error) { } } +// Hostname returns a [StateOption] which sets the hostname used for containers created by [State.Run]. +// This is the equivalent of [State.Hostname] +// See [State.With] for where to use this. func Hostname(str string) StateOption { return func(s State) State { return s.WithValue(keyHostname, str) @@ -283,6 +302,9 @@ func getCgroupParent(s State) func(context.Context, *Constraints) (string, error } } +// Network returns a [StateOption] which sets the network mode used for containers created by [State.Run]. +// This is the equivalent of [State.Network] +// See [State.With] for where to use this. func Network(v pb.NetMode) StateOption { return func(s State) State { return s.WithValue(keyNetwork, v) @@ -302,6 +324,9 @@ func getNetwork(s State) func(context.Context, *Constraints) (pb.NetMode, error) } } +// Security returns a [StateOption] which sets the security mode used for containers created by [State.Run]. +// This is the equivalent of [State.Security] +// See [State.With] for where to use this. func Security(v pb.SecurityMode) StateOption { return func(s State) State { return s.WithValue(keySecurity, v) diff --git a/vendor/github.com/moby/buildkit/client/llb/resolver.go b/vendor/github.com/moby/buildkit/client/llb/resolver.go index b3b9cdf751..02644f62c7 100644 --- a/vendor/github.com/moby/buildkit/client/llb/resolver.go +++ b/vendor/github.com/moby/buildkit/client/llb/resolver.go @@ -3,6 +3,7 @@ package llb import ( "context" + spb "github.com/moby/buildkit/sourcepolicy/pb" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -31,7 +32,7 @@ func WithLayerLimit(l int) ImageOption { // ImageMetaResolver can resolve image config metadata from a reference type ImageMetaResolver interface { - ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (digest.Digest, []byte, error) + ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (string, digest.Digest, []byte, error) } type ResolverType int @@ -49,6 +50,8 @@ type ResolveImageConfigOpt struct { LogName string Store ResolveImageConfigOptStore + + SourcePolicies []*spb.Policy } type ResolveImageConfigOptStore struct { diff --git a/vendor/github.com/moby/buildkit/client/llb/source.go b/vendor/github.com/moby/buildkit/client/llb/source.go index 27c8c1b617..fa1096a67c 100644 --- a/vendor/github.com/moby/buildkit/client/llb/source.go +++ b/vendor/github.com/moby/buildkit/client/llb/source.go @@ -91,6 +91,10 @@ func (s *SourceOp) Inputs() []Output { return nil } +// Image returns a state that represents a docker image in a registry. +// Example: +// +// st := llb.Image("busybox:latest") func Image(ref string, opts ...ImageOption) State { r, err := reference.ParseNormalizedNamed(ref) if err == nil { @@ -131,7 +135,7 @@ func Image(ref string, opts ...ImageOption) State { if p == nil { p = c.Platform } - _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{ + _, _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{ Platform: p, ResolveMode: info.resolveMode.String(), ResolverType: ResolverTypeRegistry, @@ -147,7 +151,7 @@ func Image(ref string, opts ...ImageOption) State { if p == nil { p = c.Platform } - dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{ + ref, dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{ Platform: p, ResolveMode: info.resolveMode.String(), ResolverType: ResolverTypeRegistry, @@ -155,6 +159,10 @@ func Image(ref string, opts ...ImageOption) State { if err != nil { return State{}, err } + r, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return State{}, err + } if dgst != "" { r, err = reference.WithDigest(r, dgst) if err != nil { @@ -215,6 +223,20 @@ type ImageInfo struct { RecordType string } +// Git returns a state that represents a git repository. +// Example: +// +// st := llb.Git("https://github.com/moby/buildkit.git#v0.11.6") +// +// The example fetches the v0.11.6 tag of the buildkit repository. +// You can also use a commit hash or a branch name. +// +// Other URL formats are supported such as "git@github.com:moby/buildkit.git", "git://...", "ssh://..." +// Formats that utilize SSH may need to supply credentials as a [GitOption]. +// You may need to check the source code for a full list of supported formats. +// +// By default the git repository is cloned with `--depth=1` to reduce the amount of data downloaded. +// Additionally the ".git" directory is removed after the clone, you can keep ith with the [KeepGitDir] [GitOption]. func Git(remote, ref string, opts ...GitOption) State { url := strings.Split(remote, "#")[0] @@ -346,10 +368,12 @@ func MountSSHSock(sshID string) GitOption { }) } +// Scratch returns a state that represents an empty filesystem. func Scratch() State { return NewState(nil) } +// Local returns a state that represents a directory local to the client. func Local(name string, opts ...LocalOption) State { gi := &LocalInfo{} diff --git a/vendor/github.com/moby/buildkit/client/llb/sourcemap.go b/vendor/github.com/moby/buildkit/client/llb/sourcemap.go index 17cc1de6f5..721db3cebe 100644 --- a/vendor/github.com/moby/buildkit/client/llb/sourcemap.go +++ b/vendor/github.com/moby/buildkit/client/llb/sourcemap.go @@ -7,17 +7,30 @@ import ( digest "github.com/opencontainers/go-digest" ) +// SourceMap maps a source file/location to an LLB state/definition. +// SourceMaps are used to provide information for debugging and helpful error messages to the user. +// As an example, lets say you have a Dockerfile with the following content: +// +// FROM alpine +// RUN exit 1 +// +// When the "RUN" statement exits with a non-zero exit code buildkit will treat +// it as an error and is able to provide the user with a helpful error message +// pointing to exactly the line in the Dockerfile that caused the error. type SourceMap struct { State *State Definition *Definition Filename string - Data []byte + // Language should use names defined in https://github.com/github/linguist/blob/v7.24.1/lib/linguist/languages.yml + Language string + Data []byte } -func NewSourceMap(st *State, filename string, dt []byte) *SourceMap { +func NewSourceMap(st *State, filename string, lang string, dt []byte) *SourceMap { return &SourceMap{ State: st, Filename: filename, + Language: lang, Data: dt, } } @@ -82,6 +95,7 @@ func (smc *sourceMapCollector) Marshal(ctx context.Context, co ...ConstraintsOpt info := &pb.SourceInfo{ Data: m.Data, Filename: m.Filename, + Language: m.Language, } if def != nil { diff --git a/vendor/github.com/moby/buildkit/client/llb/state.go b/vendor/github.com/moby/buildkit/client/llb/state.go index 7d35f3be59..f15fad87ab 100644 --- a/vendor/github.com/moby/buildkit/client/llb/state.go +++ b/vendor/github.com/moby/buildkit/client/llb/state.go @@ -49,6 +49,12 @@ func NewState(o Output) State { return s } +// State represents all operations that must be done to produce a given output. +// States are immutable, and all operations return a new state linked to the previous one. +// State is the core type of the LLB API and is used to build a graph of operations. +// The graph is then marshaled into a definition that can be executed by a backend (such as buildkitd). +// +// Operations performed on a State are executed lazily after the entire state graph is marshalled and sent to the backend. type State struct { out Output prev *State @@ -123,6 +129,7 @@ func (s State) SetMarshalDefaults(co ...ConstraintsOpt) State { return s } +// Marshal marshals the state and all its parents into a [Definition]. func (s State) Marshal(ctx context.Context, co ...ConstraintsOpt) (*Definition, error) { c := NewConstraints(append(s.opts, co...)...) def := &Definition{ @@ -208,10 +215,13 @@ func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollect return def, nil } +// Validate validates the state. +// This validation, unlike most other operations on [State], is not lazily performed. func (s State) Validate(ctx context.Context, c *Constraints) error { return s.Output().Vertex(ctx, c).Validate(ctx, c) } +// Output returns the output of the state. func (s State) Output() Output { if s.async != nil { return s.async.Output() @@ -219,6 +229,7 @@ func (s State) Output() Output { return s.out } +// WithOutput creats a new state with the output set to the given output. func (s State) WithOutput(o Output) State { prev := s s = State{ @@ -229,6 +240,7 @@ func (s State) WithOutput(o Output) State { return s } +// WithImageConfig adds the environment variables, working directory, and platform specified in the image config to the state. func (s State) WithImageConfig(c []byte) (State, error) { var img ocispecs.Image if err := json.Unmarshal(c, &img); err != nil { @@ -255,6 +267,12 @@ func (s State) WithImageConfig(c []byte) (State, error) { return s, nil } +// Run performs the command specified by the arguments within the contexst of the current [State]. +// The command is executed as a container with the [State]'s filesystem as the root filesystem. +// As such any command you run must be present in the [State]'s filesystem. +// Constraints such as [State.Ulimit], [State.ParentCgroup], [State.Network], etc. are applied to the container. +// +// Run is useful when none of the LLB ops are sufficient for the operation that you want to perform. func (s State) Run(ro ...RunOption) ExecState { ei := &ExecInfo{State: s} for _, o := range ro { @@ -273,6 +291,8 @@ func (s State) Run(ro ...RunOption) ExecState { } } +// File performs a file operation on the current state. +// See [FileAction] for details on the operations that can be performed. func (s State) File(a *FileAction, opts ...ConstraintsOpt) State { var c Constraints for _, o := range opts { @@ -282,21 +302,29 @@ func (s State) File(a *FileAction, opts ...ConstraintsOpt) State { return s.WithOutput(NewFileOp(s, a, c).Output()) } +// AddEnv returns a new [State] with the provided environment variable set. +// See [AddEnv] func (s State) AddEnv(key, value string) State { return AddEnv(key, value)(s) } +// AddEnvf is the same as [State.AddEnv] but with a format string. func (s State) AddEnvf(key, value string, v ...interface{}) State { return AddEnvf(key, value, v...)(s) } +// Dir returns a new [State] with the provided working directory set. +// See [Dir] func (s State) Dir(str string) State { return Dir(str)(s) } + +// Dirf is the same as [State.Dir] but with a format string. func (s State) Dirf(str string, v ...interface{}) State { return Dirf(str, v...)(s) } +// GetEnv returns the value of the environment variable with the provided key. func (s State) GetEnv(ctx context.Context, key string, co ...ConstraintsOpt) (string, bool, error) { c := &Constraints{} for _, f := range co { @@ -310,6 +338,8 @@ func (s State) GetEnv(ctx context.Context, key string, co ...ConstraintsOpt) (st return v, ok, nil } +// Env returns a new [State] with the provided environment variable set. +// See [Env] func (s State) Env(ctx context.Context, co ...ConstraintsOpt) ([]string, error) { c := &Constraints{} for _, f := range co { @@ -322,6 +352,7 @@ func (s State) Env(ctx context.Context, co ...ConstraintsOpt) ([]string, error) return env.ToArray(), nil } +// GetDir returns the current working directory for the state. func (s State) GetDir(ctx context.Context, co ...ConstraintsOpt) (string, error) { c := &Constraints{} for _, f := range co { @@ -338,18 +369,28 @@ func (s State) GetArgs(ctx context.Context, co ...ConstraintsOpt) ([]string, err return getArgs(s)(ctx, c) } +// Reset is used to return a new [State] with all of the current state and the +// provided [State] as the parent. In effect you can think of this as creating +// a new state with all the output from the current state but reparented to the +// provided state. See [Reset] for more details. func (s State) Reset(s2 State) State { return Reset(s2)(s) } +// User sets the user for this state. +// See [User] for more details. func (s State) User(v string) State { return User(v)(s) } +// Hostname sets the hostname for this state. +// See [Hostname] for more details. func (s State) Hostname(v string) State { return Hostname(v)(s) } +// GetHostname returns the hostname set on the state. +// See [Hostname] for more details. func (s State) GetHostname(ctx context.Context, co ...ConstraintsOpt) (string, error) { c := &Constraints{} for _, f := range co { @@ -358,10 +399,14 @@ func (s State) GetHostname(ctx context.Context, co ...ConstraintsOpt) (string, e return getHostname(s)(ctx, c) } +// Platform sets the platform for the state. Platforms are used to determine +// image variants to pull and run as well as the platform metadata to set on the +// image config. func (s State) Platform(p ocispecs.Platform) State { return platform(p)(s) } +// GetPlatform returns the platform for the state. func (s State) GetPlatform(ctx context.Context, co ...ConstraintsOpt) (*ocispecs.Platform, error) { c := &Constraints{} for _, f := range co { @@ -370,10 +415,14 @@ func (s State) GetPlatform(ctx context.Context, co ...ConstraintsOpt) (*ocispecs return getPlatform(s)(ctx, c) } +// Network sets the network mode for the state. +// Network modes are used by [State.Run] to determine the network mode used when running the container. +// Network modes are not applied to image configs. func (s State) Network(n pb.NetMode) State { return Network(n)(s) } +// GetNetwork returns the network mode for the state. func (s State) GetNetwork(ctx context.Context, co ...ConstraintsOpt) (pb.NetMode, error) { c := &Constraints{} for _, f := range co { @@ -381,10 +430,15 @@ func (s State) GetNetwork(ctx context.Context, co ...ConstraintsOpt) (pb.NetMode } return getNetwork(s)(ctx, c) } + +// Security sets the security mode for the state. +// Security modes are used by [State.Run] to the privileges that processes in the container will run with. +// Security modes are not applied to image configs. func (s State) Security(n pb.SecurityMode) State { return Security(n)(s) } +// GetSecurity returns the security mode for the state. func (s State) GetSecurity(ctx context.Context, co ...ConstraintsOpt) (pb.SecurityMode, error) { c := &Constraints{} for _, f := range co { @@ -393,6 +447,8 @@ func (s State) GetSecurity(ctx context.Context, co ...ConstraintsOpt) (pb.Securi return getSecurity(s)(ctx, c) } +// With applies [StateOption]s to the [State]. +// Each applied [StateOption] creates a new [State] object with the previous as its parent. func (s State) With(so ...StateOption) State { for _, o := range so { s = o(s) @@ -400,14 +456,23 @@ func (s State) With(so ...StateOption) State { return s } +// AddExtraHost adds a host name to IP mapping to any containers created from this state. func (s State) AddExtraHost(host string, ip net.IP) State { return extraHost(host, ip)(s) } +// AddUlimit sets the hard/soft for the given ulimit. +// The ulimit is applied to containers created from this state. +// Ulimits are Linux specific and only applies to containers created from this state such as via `[State.Run]` +// Ulimits do not apply to image configs. func (s State) AddUlimit(name UlimitName, soft int64, hard int64) State { return ulimit(name, soft, hard)(s) } +// WithCgroupParent sets the parent cgroup for any containers created from this state. +// This is useful when you want to apply resource constraints to a group of containers. +// Cgroups are Linux specific and only applies to containers created from this state such as via `[State.Run]` +// Cgroups do not apply to image configs. func (s State) WithCgroupParent(cp string) State { return cgroupParent(cp)(s) } diff --git a/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go b/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go index 3731ff36bb..156976f5dd 100644 --- a/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go +++ b/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go @@ -20,15 +20,18 @@ const ( ) type StoreIndex struct { - indexPath string - lockPath string + indexPath string + lockPath string + layoutPath string } func NewStoreIndex(storePath string) StoreIndex { indexPath := path.Join(storePath, indexFile) + layoutPath := path.Join(storePath, ocispecs.ImageLayoutFile) return StoreIndex{ - indexPath: indexPath, - lockPath: indexPath + lockFileSuffix, + indexPath: indexPath, + lockPath: indexPath + lockFileSuffix, + layoutPath: layoutPath, } } @@ -58,6 +61,7 @@ func (s StoreIndex) Read() (*ocispecs.Index, error) { } func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error { + // lock the store to prevent concurrent access lock := flock.New(s.lockPath) locked, err := lock.TryLock() if err != nil { @@ -71,20 +75,33 @@ func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error { os.RemoveAll(s.lockPath) }() - f, err := os.OpenFile(s.indexPath, os.O_RDWR|os.O_CREATE, 0644) + // create the oci-layout file + layout := ocispecs.ImageLayout{ + Version: ocispecs.ImageLayoutVersion, + } + layoutData, err := json.Marshal(layout) + if err != nil { + return err + } + if err := os.WriteFile(s.layoutPath, layoutData, 0644); err != nil { + return err + } + + // modify the index file + idxFile, err := os.OpenFile(s.indexPath, os.O_RDWR|os.O_CREATE, 0644) if err != nil { return errors.Wrapf(err, "could not open %s", s.indexPath) } - defer f.Close() + defer idxFile.Close() var idx ocispecs.Index - b, err := io.ReadAll(f) + idxData, err := io.ReadAll(idxFile) if err != nil { return errors.Wrapf(err, "could not read %s", s.indexPath) } - if len(b) > 0 { - if err := json.Unmarshal(b, &idx); err != nil { - return errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(b)) + if len(idxData) > 0 { + if err := json.Unmarshal(idxData, &idx); err != nil { + return errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(idxData)) } } @@ -92,15 +109,15 @@ func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error { return err } - b, err = json.Marshal(idx) + idxData, err = json.Marshal(idx) if err != nil { return err } - if _, err = f.WriteAt(b, 0); err != nil { - return err + if _, err = idxFile.WriteAt(idxData, 0); err != nil { + return errors.Wrapf(err, "could not write %s", s.indexPath) } - if err = f.Truncate(int64(len(b))); err != nil { - return err + if err = idxFile.Truncate(int64(len(idxData))); err != nil { + return errors.Wrapf(err, "could not truncate %s", s.indexPath) } return nil } diff --git a/vendor/github.com/moby/buildkit/client/solve.go b/vendor/github.com/moby/buildkit/client/solve.go index 65183d61cd..22ff2031d4 100644 --- a/vendor/github.com/moby/buildkit/client/solve.go +++ b/vendor/github.com/moby/buildkit/client/solve.go @@ -169,7 +169,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG } if supportFile && supportDir { - return nil, errors.Errorf("both file and directory output is not support by %s exporter", ex.Type) + return nil, errors.Errorf("both file and directory output is not supported by %s exporter", ex.Type) } if !supportFile && ex.Output != nil { return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type) diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go index 1734d5e156..a92588e53f 100644 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go @@ -7,6 +7,7 @@ import ( // Config provides containerd configuration data for the server type Config struct { Debug bool `toml:"debug"` + Trace bool `toml:"trace"` // Root is the path to a directory where buildkit will store persistent data Root string `toml:"root"` @@ -47,7 +48,7 @@ type TLSConfig struct { type GCConfig struct { GC *bool `toml:"gc"` - GCKeepStorage int64 `toml:"gckeepstorage"` + GCKeepStorage DiskSpace `toml:"gckeepstorage"` GCPolicy []GCPolicy `toml:"gcpolicy"` } @@ -114,10 +115,10 @@ type ContainerdConfig struct { } type GCPolicy struct { - All bool `toml:"all"` - KeepBytes int64 `toml:"keepBytes"` - KeepDuration int64 `toml:"keepDuration"` - Filters []string `toml:"filters"` + All bool `toml:"all"` + KeepBytes DiskSpace `toml:"keepBytes"` + KeepDuration Duration `toml:"keepDuration"` + Filters []string `toml:"filters"` } type DNSConfig struct { @@ -127,6 +128,6 @@ type DNSConfig struct { } type HistoryConfig struct { - MaxAge int64 `toml:"maxAge"` - MaxEntries int64 `toml:"maxEntries"` + MaxAge Duration `toml:"maxAge"` + MaxEntries int64 `toml:"maxEntries"` } diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go index 6f3f197893..4078cc6d59 100644 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go @@ -1,21 +1,86 @@ package config +import ( + "encoding" + "strconv" + "strings" + "time" + + "github.com/docker/go-units" + "github.com/pkg/errors" +) + +type Duration struct { + time.Duration +} + +func (d *Duration) UnmarshalText(textb []byte) error { + text := stripQuotes(string(textb)) + if len(text) == 0 { + return nil + } + + if duration, err := time.ParseDuration(text); err == nil { + d.Duration = duration + return nil + } + + if i, err := strconv.ParseInt(text, 10, 64); err == nil { + d.Duration = time.Duration(i) * time.Second + return nil + } + + return errors.Errorf("invalid duration %s", text) +} + +var _ encoding.TextUnmarshaler = &Duration{} + +type DiskSpace struct { + Bytes int64 + Percentage int64 +} + +var _ encoding.TextUnmarshaler = &DiskSpace{} + +func (d *DiskSpace) UnmarshalText(textb []byte) error { + text := stripQuotes(string(textb)) + if len(text) == 0 { + return nil + } + + if text2 := strings.TrimSuffix(text, "%"); len(text2) < len(text) { + i, err := strconv.ParseInt(text2, 10, 64) + if err != nil { + return err + } + d.Percentage = i + return nil + } + + if i, err := units.RAMInBytes(text); err == nil { + d.Bytes = i + return nil + } + + return errors.Errorf("invalid disk space %s", text) +} + const defaultCap int64 = 2e9 // 2GB -func DefaultGCPolicy(p string, keep int64) []GCPolicy { - if keep == 0 { - keep = DetectDefaultGCCap(p) +func DefaultGCPolicy(keep DiskSpace) []GCPolicy { + if keep == (DiskSpace{}) { + keep = DetectDefaultGCCap() } return []GCPolicy{ // if build cache uses more than 512MB delete the most easily reproducible data after it has not been used for 2 days { Filters: []string{"type==source.local,type==exec.cachemount,type==source.git.checkout"}, - KeepDuration: 48 * 3600, // 48h - KeepBytes: 512 * 1e6, // 512MB + KeepDuration: Duration{Duration: time.Duration(48) * time.Hour}, // 48h + KeepBytes: DiskSpace{Bytes: 512 * 1e6}, // 512MB }, // remove any data not used for 60 days { - KeepDuration: 60 * 24 * 3600, // 60d + KeepDuration: Duration{Duration: time.Duration(60) * 24 * time.Hour}, // 60d KeepBytes: keep, }, // keep the unshared build cache under cap @@ -29,3 +94,13 @@ func DefaultGCPolicy(p string, keep int64) []GCPolicy { }, } } + +func stripQuotes(s string) string { + if len(s) == 0 { + return s + } + if s[0] == '"' && s[len(s)-1] == '"' { + return s[1 : len(s)-1] + } + return s +} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go index a2efe6f568..232a9ac336 100644 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go @@ -7,12 +7,23 @@ import ( "syscall" ) -func DetectDefaultGCCap(root string) int64 { +func DetectDefaultGCCap() DiskSpace { + return DiskSpace{Percentage: 10} +} + +func (d DiskSpace) AsBytes(root string) int64 { + if d.Bytes != 0 { + return d.Bytes + } + if d.Percentage == 0 { + return 0 + } + var st syscall.Statfs_t if err := syscall.Statfs(root, &st); err != nil { return defaultCap } diskSize := int64(st.Bsize) * int64(st.Blocks) - avail := diskSize / 10 + avail := diskSize * d.Percentage / 100 return (avail/(1<<30) + 1) * 1e9 // round up } diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go index 349fddbd51..55ce4dd772 100644 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go @@ -3,6 +3,10 @@ package config -func DetectDefaultGCCap(root string) int64 { - return defaultCap +func DetectDefaultGCCap() DiskSpace { + return DiskSpace{Bytes: defaultCap} +} + +func (d DiskSpace) AsBytes(root string) int64 { + return d.Bytes } diff --git a/vendor/github.com/moby/buildkit/control/control.go b/vendor/github.com/moby/buildkit/control/control.go index 2bd06db257..8a09b8ace3 100644 --- a/vendor/github.com/moby/buildkit/control/control.go +++ b/vendor/github.com/moby/buildkit/control/control.go @@ -10,7 +10,6 @@ import ( contentapi "github.com/containerd/containerd/api/services/content/v1" "github.com/containerd/containerd/content" - "github.com/containerd/containerd/leases" "github.com/containerd/containerd/services/content/contentserver" "github.com/docker/distribution/reference" "github.com/mitchellh/hashstructure/v2" @@ -18,20 +17,24 @@ import ( apitypes "github.com/moby/buildkit/api/types" "github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/client" + "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/cmd/buildkitd/config" controlgateway "github.com/moby/buildkit/control/gateway" "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/exporter/util/epoch" "github.com/moby/buildkit/frontend" "github.com/moby/buildkit/frontend/attestations" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/grpchijack" + containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/llbsolver" "github.com/moby/buildkit/solver/llbsolver/proc" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/throttle" "github.com/moby/buildkit/util/tracing/transform" "github.com/moby/buildkit/version" @@ -52,14 +55,14 @@ type Opt struct { SessionManager *session.Manager WorkerController *worker.Controller Frontends map[string]frontend.Frontend - CacheKeyStorage solver.CacheKeyStorage + CacheManager solver.CacheManager ResolveCacheExporterFuncs map[string]remotecache.ResolveCacheExporterFunc ResolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc Entitlements []string TraceCollector sdktrace.SpanExporter HistoryDB *bbolt.DB - LeaseManager leases.Manager - ContentStore content.Store + LeaseManager *leaseutil.Manager + ContentStore *containerdsnapshot.Store HistoryConfig *config.HistoryConfig } @@ -77,21 +80,22 @@ type Controller struct { // TODO: ControlService } func NewController(opt Opt) (*Controller, error) { - cache := solver.NewCacheManager(context.TODO(), "local", opt.CacheKeyStorage, worker.NewCacheResultStorage(opt.WorkerController)) - gatewayForwarder := controlgateway.NewGatewayForwarder() - hq := llbsolver.NewHistoryQueue(llbsolver.HistoryQueueOpt{ + hq, err := llbsolver.NewHistoryQueue(llbsolver.HistoryQueueOpt{ DB: opt.HistoryDB, LeaseManager: opt.LeaseManager, ContentStore: opt.ContentStore, CleanConfig: opt.HistoryConfig, }) + if err != nil { + return nil, errors.Wrap(err, "failed to create history queue") + } s, err := llbsolver.New(llbsolver.Opt{ WorkerController: opt.WorkerController, Frontends: opt.Frontends, - CacheManager: cache, + CacheManager: opt.CacheManager, CacheResolvers: opt.ResolveCacheImporterFuncs, GatewayForwarder: gatewayForwarder, SessionManager: opt.SessionManager, @@ -106,7 +110,7 @@ func NewController(opt Opt) (*Controller, error) { opt: opt, solver: s, history: hq, - cache: cache, + cache: opt.CacheManager, gatewayForwarder: gatewayForwarder, } c.throttledGC = throttle.After(time.Minute, c.gc) @@ -127,7 +131,7 @@ func (c *Controller) Register(server *grpc.Server) { c.gatewayForwarder.Register(server) tracev1.RegisterTraceServiceServer(server, c) - store := &roContentStore{c.opt.ContentStore} + store := &roContentStore{c.opt.ContentStore.WithFallbackNS(c.opt.ContentStore.Namespace() + "_history")} contentapi.RegisterContentServer(server, contentserver.New(store)) } @@ -170,7 +174,7 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr imageutil.CancelCacheLeases() } - ch := make(chan client.UsageInfo) + ch := make(chan client.UsageInfo, 32) eg, ctx := errgroup.WithContext(stream.Context()) workers, err := c.opt.WorkerController.List() @@ -182,9 +186,9 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr defer func() { if didPrune { if c, ok := c.cache.(interface { - ReleaseUnreferenced() error + ReleaseUnreferenced(context.Context) error }); ok { - if err := c.ReleaseUnreferenced(); err != nil { + if err := c.ReleaseUnreferenced(ctx); err != nil { bklog.G(ctx).Errorf("failed to release cache metadata: %+v", err) } } @@ -212,6 +216,11 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr }) eg2.Go(func() error { + defer func() { + // drain channel on error + for range ch { + } + }() for r := range ch { didPrune = true if err := stream.Send(&controlapi.UsageRecord{ @@ -276,7 +285,7 @@ func (c *Controller) UpdateBuildHistory(ctx context.Context, req *controlapi.Upd return &controlapi.UpdateBuildHistoryResponse{}, err } -func translateLegacySolveRequest(req *controlapi.SolveRequest) error { +func translateLegacySolveRequest(req *controlapi.SolveRequest) { // translates ExportRef and ExportAttrs to new Exports (v0.4.0) if legacyExportRef := req.Cache.ExportRefDeprecated; legacyExportRef != "" { ex := &controlapi.CacheOptionsEntry{ @@ -302,18 +311,13 @@ func translateLegacySolveRequest(req *controlapi.SolveRequest) error { req.Cache.Imports = append(req.Cache.Imports, im) } req.Cache.ImportRefsDeprecated = nil - return nil } func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) { atomic.AddInt64(&c.buildCount, 1) defer atomic.AddInt64(&c.buildCount, -1) - // This method registers job ID in solver.Solve. Make sure there are no blocking calls before that might delay this. - - if err := translateLegacySolveRequest(req); err != nil { - return nil, err - } + translateLegacySolveRequest(req) defer func() { time.AfterFunc(time.Second, c.throttledGC) @@ -329,20 +333,11 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* // if SOURCE_DATE_EPOCH is set, enable it for the exporter if v, ok := epoch.ParseBuildArgs(req.FrontendAttrs); ok { - if _, ok := req.ExporterAttrs[epoch.KeySourceDateEpoch]; !ok { + if _, ok := req.ExporterAttrs[string(exptypes.OptKeySourceDateEpoch)]; !ok { if req.ExporterAttrs == nil { req.ExporterAttrs = make(map[string]string) } - req.ExporterAttrs[epoch.KeySourceDateEpoch] = v - } - } - - if v, ok := req.FrontendAttrs["build-arg:BUILDKIT_BUILDINFO"]; ok && v != "" { - if _, ok := req.ExporterAttrs["buildinfo"]; !ok { - if req.ExporterAttrs == nil { - req.ExporterAttrs = make(map[string]string) - } - req.ExporterAttrs["buildinfo"] = v + req.ExporterAttrs[string(exptypes.OptKeySourceDateEpoch)] = v } } @@ -377,6 +372,10 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* if err != nil { return nil, errors.Wrapf(err, "failed to configure %v cache exporter", e.Type) } + if exp.Exporter == nil { + bklog.G(ctx).Debugf("cache exporter resolver for %v returned nil, skipping exporter", e.Type) + continue + } if exportMode, supported := parseCacheExportMode(e.Attrs["mode"]); !supported { bklog.G(ctx).Debugf("skipping invalid cache export mode: %s", e.Attrs["mode"]) } else { @@ -416,14 +415,19 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* if err != nil { return nil, errors.Wrapf(err, "failed to parse sbom generator %s", src) } + ref = reference.TagNameOnly(ref) useCache := true if v, ok := req.FrontendAttrs["no-cache"]; ok && v == "" { // disable cache if cache is disabled for all stages useCache = false } - ref = reference.TagNameOnly(ref) - procs = append(procs, proc.SBOMProcessor(ref.String(), useCache)) + resolveMode := llb.ResolveModeDefault.String() + if v, ok := req.FrontendAttrs["image-resolve-mode"]; ok { + resolveMode = v + } + + procs = append(procs, proc.SBOMProcessor(ref.String(), useCache, resolveMode)) } if attrs, ok := attests["provenance"]; ok { @@ -462,6 +466,11 @@ func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Con }) eg.Go(func() error { + defer func() { + // drain channel on error + for range ch { + } + }() for { ss, ok := <-ch if !ok { diff --git a/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go index ac195c4315..fa578c6d48 100644 --- a/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go @@ -21,6 +21,7 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor/oci" + resourcestypes "github.com/moby/buildkit/executor/resources/types" gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/snapshot" @@ -78,7 +79,7 @@ func New(client *containerd.Client, root, cgroup string, networkProviders map[pb } } -func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { +func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (rec resourcestypes.Recorder, err error) { if id == "" { id = identity.NewID() } @@ -105,12 +106,12 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M resolvConf, err := oci.GetResolvConf(ctx, w.root, nil, w.dnsConfig) if err != nil { - return err + return nil, err } hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, nil, meta.Hostname) if err != nil { - return err + return nil, err } if clean != nil { defer clean() @@ -118,12 +119,12 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M mountable, err := root.Src.Mount(ctx, false) if err != nil { - return err + return nil, err } rootMounts, release, err := mountable.Mount() if err != nil { - return err + return nil, err } if release != nil { defer release() @@ -132,14 +133,14 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M lm := snapshot.LocalMounterWithMounts(rootMounts) rootfsPath, err := lm.Mount() if err != nil { - return err + return nil, err } defer lm.Unmount() - defer executor.MountStubsCleaner(rootfsPath, mounts, meta.RemoveMountStubsRecursive)() + defer executor.MountStubsCleaner(ctx, rootfsPath, mounts, meta.RemoveMountStubsRecursive)() uid, gid, sgids, err := oci.GetUser(rootfsPath, meta.User) if err != nil { - return err + return nil, err } identity := idtools.Identity{ @@ -149,21 +150,21 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M newp, err := fs.RootPath(rootfsPath, meta.Cwd) if err != nil { - return errors.Wrapf(err, "working dir %s points to invalid target", newp) + return nil, errors.Wrapf(err, "working dir %s points to invalid target", newp) } if _, err := os.Stat(newp); err != nil { if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil { - return errors.Wrapf(err, "failed to create working directory %s", newp) + return nil, errors.Wrapf(err, "failed to create working directory %s", newp) } } provider, ok := w.networkProviders[meta.NetMode] if !ok { - return errors.Errorf("unknown network mode %s", meta.NetMode) + return nil, errors.Errorf("unknown network mode %s", meta.NetMode) } namespace, err := provider.New(ctx, meta.Hostname) if err != nil { - return err + return nil, err } defer namespace.Close() @@ -179,13 +180,13 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M processMode := oci.ProcessSandbox // FIXME(AkihiroSuda) spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.selinux, w.traceSocket, opts...) if err != nil { - return err + return nil, err } defer cleanup() spec.Process.Terminal = meta.Tty if w.rootless { if err := rootlessspecconv.ToRootless(spec); err != nil { - return err + return nil, err } } @@ -193,7 +194,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M containerd.WithSpec(spec), ) if err != nil { - return err + return nil, err } defer func() { @@ -214,7 +215,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M Options: []string{"rbind"}, }})) if err != nil { - return err + return nil, err } defer func() { @@ -225,7 +226,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M if nn, ok := namespace.(OnCreateRuntimer); ok { if err := nn.OnCreateRuntime(task.Pid()); err != nil { - return err + return nil, err } } @@ -238,7 +239,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M } }) }) - return err + return nil, err } func (w *containerdExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) (err error) { diff --git a/vendor/github.com/moby/buildkit/executor/executor.go b/vendor/github.com/moby/buildkit/executor/executor.go index a323bcc9cc..741f347cd9 100644 --- a/vendor/github.com/moby/buildkit/executor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/executor.go @@ -6,6 +6,7 @@ import ( "net" "syscall" + resourcestypes "github.com/moby/buildkit/executor/resources/types" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/pb" ) @@ -55,7 +56,7 @@ type Executor interface { // Run will start a container for the given process with rootfs, mounts. // `id` is an optional name for the container so it can be referenced later via Exec. // `started` is an optional channel that will be closed when the container setup completes and has started running. - Run(ctx context.Context, id string, rootfs Mount, mounts []Mount, process ProcessInfo, started chan<- struct{}) error + Run(ctx context.Context, id string, rootfs Mount, mounts []Mount, process ProcessInfo, started chan<- struct{}) (resourcestypes.Recorder, error) // Exec will start a process in container matching `id`. An error will be returned // if the container failed to start (via Run) or has exited before Exec is called. Exec(ctx context.Context, id string, process ProcessInfo) error diff --git a/vendor/github.com/moby/buildkit/executor/oci/hosts.go b/vendor/github.com/moby/buildkit/executor/oci/hosts.go index 0d193555c9..0de29f8a8d 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/hosts.go +++ b/vendor/github.com/moby/buildkit/executor/oci/hosts.go @@ -20,9 +20,9 @@ func GetHostsFile(ctx context.Context, stateDir string, extraHosts []executor.Ho return makeHostsFile(stateDir, extraHosts, idmap, hostname) } - _, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) { + _, err := g.Do(ctx, stateDir, func(ctx context.Context) (struct{}, error) { _, _, err := makeHostsFile(stateDir, nil, idmap, hostname) - return nil, err + return struct{}{}, err }) if err != nil { return "", nil, err diff --git a/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go b/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go index 3ac0feda7a..9db0b3dfaa 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go +++ b/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go @@ -11,12 +11,12 @@ import ( "github.com/pkg/errors" ) -var g flightcontrol.Group +var g flightcontrol.Group[struct{}] var notFirstRun bool var lastNotEmpty bool // overridden by tests -var resolvconfGet = resolvconf.Get +var resolvconfPath = resolvconf.Path type DNSConfig struct { Nameservers []string @@ -26,7 +26,7 @@ type DNSConfig struct { func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.IdentityMapping, dns *DNSConfig) (string, error) { p := filepath.Join(stateDir, "resolv.conf") - _, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) { + _, err := g.Do(ctx, stateDir, func(ctx context.Context) (struct{}, error) { generate := !notFirstRun notFirstRun = true @@ -34,15 +34,15 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity fi, err := os.Stat(p) if err != nil { if !errors.Is(err, os.ErrNotExist) { - return "", err + return struct{}{}, err } generate = true } if !generate { - fiMain, err := os.Stat(resolvconf.Path()) + fiMain, err := os.Stat(resolvconfPath()) if err != nil { if !errors.Is(err, os.ErrNotExist) { - return nil, err + return struct{}{}, err } if lastNotEmpty { generate = true @@ -57,63 +57,59 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity } if !generate { - return "", nil + return struct{}{}, nil } - var dt []byte - f, err := resolvconfGet() - if err != nil { - if !errors.Is(err, os.ErrNotExist) { - return "", err - } - } else { - dt = f.Content + dt, err := os.ReadFile(resolvconfPath()) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return struct{}{}, err } + var f *resolvconf.File + tmpPath := p + ".tmp" if dns != nil { var ( - dnsNameservers = resolvconf.GetNameservers(dt, resolvconf.IP) - dnsSearchDomains = resolvconf.GetSearchDomains(dt) - dnsOptions = resolvconf.GetOptions(dt) - ) - if len(dns.Nameservers) > 0 { - dnsNameservers = dns.Nameservers - } - if len(dns.SearchDomains) > 0 { + dnsNameservers = dns.Nameservers dnsSearchDomains = dns.SearchDomains + dnsOptions = dns.Options + ) + if len(dns.Nameservers) == 0 { + dnsNameservers = resolvconf.GetNameservers(dt, resolvconf.IP) } - if len(dns.Options) > 0 { - dnsOptions = dns.Options + if len(dns.SearchDomains) == 0 { + dnsSearchDomains = resolvconf.GetSearchDomains(dt) + } + if len(dns.Options) == 0 { + dnsOptions = resolvconf.GetOptions(dt) } - f, err = resolvconf.Build(p+".tmp", dnsNameservers, dnsSearchDomains, dnsOptions) + f, err = resolvconf.Build(tmpPath, dnsNameservers, dnsSearchDomains, dnsOptions) if err != nil { - return "", err + return struct{}{}, err } dt = f.Content } f, err = resolvconf.FilterResolvDNS(dt, true) if err != nil { - return "", err + return struct{}{}, err } - tmpPath := p + ".tmp" if err := os.WriteFile(tmpPath, f.Content, 0644); err != nil { - return "", err + return struct{}{}, err } if idmap != nil { root := idmap.RootPair() if err := os.Chown(tmpPath, root.UID, root.GID); err != nil { - return "", err + return struct{}{}, err } } if err := os.Rename(tmpPath, p); err != nil { - return "", err + return struct{}{}, err } - return "", nil + return struct{}{}, nil }) if err != nil { return "", err diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec.go b/vendor/github.com/moby/buildkit/executor/oci/spec.go index f825b1dce7..c6d665b081 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec.go @@ -37,6 +37,12 @@ const ( NoProcessSandbox ) +var tracingEnvVars = []string{ + "OTEL_TRACES_EXPORTER=otlp", + "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=" + getTracingSocket(), + "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL=grpc", +} + func (pm ProcessMode) String() string { switch pm { case ProcessSandbox: @@ -114,7 +120,7 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou if tracingSocket != "" { // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md - meta.Env = append(meta.Env, "OTEL_TRACES_EXPORTER=otlp", "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=unix:///dev/otel-grpc.sock", "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL=grpc") + meta.Env = append(meta.Env, tracingEnvVars...) meta.Env = append(meta.Env, traceexec.Environ(ctx)...) } @@ -131,6 +137,12 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou return nil, nil, err } + if cgroupNamespaceSupported() { + s.Linux.Namespaces = append(s.Linux.Namespaces, specs.LinuxNamespace{ + Type: specs.CgroupNamespace, + }) + } + if len(meta.Ulimit) == 0 { // reset open files limit s.Process.Rlimits = nil @@ -185,12 +197,7 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou } if tracingSocket != "" { - s.Mounts = append(s.Mounts, specs.Mount{ - Destination: "/dev/otel-grpc.sock", - Type: "bind", - Source: tracingSocket, - Options: []string{"ro", "rbind"}, - }) + s.Mounts = append(s.Mounts, getTracingSocketMount(tracingSocket)) } s.Mounts = dedupMounts(s.Mounts) diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go index f906f79b6b..97e95e9834 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go @@ -6,7 +6,9 @@ package oci import ( "context" "fmt" + "os" "strings" + "sync" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/oci" @@ -21,6 +23,15 @@ import ( "github.com/pkg/errors" ) +var ( + cgroupNSOnce sync.Once + supportsCgroupNS bool +) + +const ( + tracingSocketPath = "/dev/otel-grpc.sock" +) + func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) { return []oci.SpecOpts{ // https://github.com/moby/buildkit/issues/429 @@ -122,3 +133,25 @@ func withDefaultProfile() oci.SpecOpts { return err } } + +func getTracingSocketMount(socket string) specs.Mount { + return specs.Mount{ + Destination: tracingSocketPath, + Type: "bind", + Source: socket, + Options: []string{"ro", "rbind"}, + } +} + +func getTracingSocket() string { + return fmt.Sprintf("unix://%s", tracingSocketPath) +} + +func cgroupNamespaceSupported() bool { + cgroupNSOnce.Do(func() { + if _, err := os.Stat("/proc/self/ns/cgroup"); !os.IsNotExist(err) { + supportsCgroupNS = true + } + }) + return supportsCgroupNS +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go b/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go index 48b0969e39..83ee278187 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go @@ -4,12 +4,20 @@ package oci import ( + "fmt" + "path/filepath" + "github.com/containerd/containerd/oci" "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/solver/pb" + specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) +const ( + tracingSocketPath = "//./pipe/otel-grpc" +) + func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) { return nil, nil } @@ -43,3 +51,19 @@ func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) { } return nil, errors.New("no support for POSIXRlimit on Windows") } + +func getTracingSocketMount(socket string) specs.Mount { + return specs.Mount{ + Destination: filepath.FromSlash(tracingSocketPath), + Source: socket, + Options: []string{"ro"}, + } +} + +func getTracingSocket() string { + return fmt.Sprintf("npipe://%s", filepath.ToSlash(tracingSocketPath)) +} + +func cgroupNamespaceSupported() bool { + return false +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/cpu.go b/vendor/github.com/moby/buildkit/executor/resources/cpu.go new file mode 100644 index 0000000000..53d31f477f --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/cpu.go @@ -0,0 +1,141 @@ +package resources + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + + "github.com/moby/buildkit/executor/resources/types" + "github.com/pkg/errors" +) + +const ( + cpuUsageUsec = "usage_usec" + cpuUserUsec = "user_usec" + cpuSystemUsec = "system_usec" + cpuNrPeriods = "nr_periods" + cpuNrThrottled = "nr_throttled" + cpuThrottledUsec = "throttled_usec" +) + +func getCgroupCPUStat(cgroupPath string) (*types.CPUStat, error) { + cpuStat := &types.CPUStat{} + + // Read cpu.stat file + cpuStatFile, err := os.Open(filepath.Join(cgroupPath, "cpu.stat")) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } + return nil, err + } + defer cpuStatFile.Close() + + scanner := bufio.NewScanner(cpuStatFile) + for scanner.Scan() { + line := scanner.Text() + fields := strings.Fields(line) + + if len(fields) < 2 { + continue + } + + key := fields[0] + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + + switch key { + case cpuUsageUsec: + cpuStat.UsageNanos = uint64Ptr(value * 1000) + case cpuUserUsec: + cpuStat.UserNanos = uint64Ptr(value * 1000) + case cpuSystemUsec: + cpuStat.SystemNanos = uint64Ptr(value * 1000) + case cpuNrPeriods: + cpuStat.NrPeriods = new(uint32) + *cpuStat.NrPeriods = uint32(value) + case cpuNrThrottled: + cpuStat.NrThrottled = new(uint32) + *cpuStat.NrThrottled = uint32(value) + case cpuThrottledUsec: + cpuStat.ThrottledNanos = uint64Ptr(value * 1000) + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + // Read cpu.pressure file + pressure, err := parsePressureFile(filepath.Join(cgroupPath, "cpu.pressure")) + if err == nil { + cpuStat.Pressure = pressure + } + + return cpuStat, nil +} +func parsePressureFile(filename string) (*types.Pressure, error) { + content, err := os.ReadFile(filename) + if err != nil { + if errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTSUP) { // pressure file requires CONFIG_PSI + return nil, nil + } + return nil, err + } + + lines := strings.Split(string(content), "\n") + + pressure := &types.Pressure{} + for _, line := range lines { + // Skip empty lines + if len(strings.TrimSpace(line)) == 0 { + continue + } + + fields := strings.Fields(line) + prefix := fields[0] + pressureValues := &types.PressureValues{} + + for i := 1; i < len(fields); i++ { + keyValue := strings.Split(fields[i], "=") + key := keyValue[0] + valueStr := keyValue[1] + + if key == "total" { + totalValue, err := strconv.ParseUint(valueStr, 10, 64) + if err != nil { + return nil, err + } + pressureValues.Total = &totalValue + } else { + value, err := strconv.ParseFloat(valueStr, 64) + if err != nil { + return nil, err + } + + switch key { + case "avg10": + pressureValues.Avg10 = &value + case "avg60": + pressureValues.Avg60 = &value + case "avg300": + pressureValues.Avg300 = &value + } + } + } + + switch prefix { + case "some": + pressure.Some = pressureValues + case "full": + pressure.Full = pressureValues + } + } + + return pressure, nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/io.go b/vendor/github.com/moby/buildkit/executor/resources/io.go new file mode 100644 index 0000000000..be56d76375 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/io.go @@ -0,0 +1,117 @@ +package resources + +import ( + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/moby/buildkit/executor/resources/types" + "github.com/pkg/errors" +) + +const ( + ioStatFile = "io.stat" + ioPressureFile = "io.pressure" +) + +const ( + ioReadBytes = "rbytes" + ioWriteBytes = "wbytes" + ioDiscardBytes = "dbytes" + ioReadIOs = "rios" + ioWriteIOs = "wios" + ioDiscardIOs = "dios" +) + +func getCgroupIOStat(cgroupPath string) (*types.IOStat, error) { + ioStatPath := filepath.Join(cgroupPath, ioStatFile) + data, err := os.ReadFile(ioStatPath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } + return nil, errors.Wrapf(err, "failed to read %s", ioStatPath) + } + + ioStat := &types.IOStat{} + lines := strings.Split(string(data), "\n") + for _, line := range lines { + parts := strings.Fields(line) + if len(parts) < 2 { + continue + } + + for _, part := range parts[1:] { + key, value := parseKeyValue(part) + if key == "" { + continue + } + + switch key { + case ioReadBytes: + if ioStat.ReadBytes != nil { + *ioStat.ReadBytes += value + } else { + ioStat.ReadBytes = uint64Ptr(value) + } + case ioWriteBytes: + if ioStat.WriteBytes != nil { + *ioStat.WriteBytes += value + } else { + ioStat.WriteBytes = uint64Ptr(value) + } + case ioDiscardBytes: + if ioStat.DiscardBytes != nil { + *ioStat.DiscardBytes += value + } else { + ioStat.DiscardBytes = uint64Ptr(value) + } + case ioReadIOs: + if ioStat.ReadIOs != nil { + *ioStat.ReadIOs += value + } else { + ioStat.ReadIOs = uint64Ptr(value) + } + case ioWriteIOs: + if ioStat.WriteIOs != nil { + *ioStat.WriteIOs += value + } else { + ioStat.WriteIOs = uint64Ptr(value) + } + case ioDiscardIOs: + if ioStat.DiscardIOs != nil { + *ioStat.DiscardIOs += value + } else { + ioStat.DiscardIOs = uint64Ptr(value) + } + } + } + } + + // Parse the pressure + pressure, err := parsePressureFile(filepath.Join(cgroupPath, ioPressureFile)) + if err != nil { + return nil, err + } + ioStat.Pressure = pressure + + return ioStat, nil +} + +func parseKeyValue(kv string) (key string, value uint64) { + parts := strings.SplitN(kv, "=", 2) + if len(parts) != 2 { + return "", 0 + } + key = parts[0] + value, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return "", 0 + } + return key, value +} + +func uint64Ptr(v uint64) *uint64 { + return &v +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/memory.go b/vendor/github.com/moby/buildkit/executor/resources/memory.go new file mode 100644 index 0000000000..775f0f8dae --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/memory.go @@ -0,0 +1,159 @@ +package resources + +import ( + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/moby/buildkit/executor/resources/types" + "github.com/pkg/errors" +) + +const ( + memoryStatFile = "memory.stat" + memoryPressureFile = "memory.pressure" + memoryPeakFile = "memory.peak" + memorySwapCurrentFile = "memory.swap.current" + memoryEventsFile = "memory.events" +) + +const ( + memoryAnon = "anon" + memoryFile = "file" + memoryKernelStack = "kernel_stack" + memoryPageTables = "pagetables" + memorySock = "sock" + memoryShmem = "shmem" + memoryFileMapped = "file_mapped" + memoryFileDirty = "file_dirty" + memoryFileWriteback = "file_writeback" + memorySlab = "slab" + memoryPgscan = "pgscan" + memoryPgsteal = "pgsteal" + memoryPgfault = "pgfault" + memoryPgmajfault = "pgmajfault" + + memoryLow = "low" + memoryHigh = "high" + memoryMax = "max" + memoryOom = "oom" + memoryOomKill = "oom_kill" +) + +func getCgroupMemoryStat(path string) (*types.MemoryStat, error) { + memoryStat := &types.MemoryStat{} + + // Parse memory.stat + err := parseKeyValueFile(filepath.Join(path, memoryStatFile), func(key string, value uint64) { + switch key { + case memoryAnon: + memoryStat.Anon = &value + case memoryFile: + memoryStat.File = &value + case memoryKernelStack: + memoryStat.KernelStack = &value + case memoryPageTables: + memoryStat.PageTables = &value + case memorySock: + memoryStat.Sock = &value + case memoryShmem: + memoryStat.Shmem = &value + case memoryFileMapped: + memoryStat.FileMapped = &value + case memoryFileDirty: + memoryStat.FileDirty = &value + case memoryFileWriteback: + memoryStat.FileWriteback = &value + case memorySlab: + memoryStat.Slab = &value + case memoryPgscan: + memoryStat.Pgscan = &value + case memoryPgsteal: + memoryStat.Pgsteal = &value + case memoryPgfault: + memoryStat.Pgfault = &value + case memoryPgmajfault: + memoryStat.Pgmajfault = &value + } + }) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } + return nil, err + } + + pressure, err := parsePressureFile(filepath.Join(path, memoryPressureFile)) + if err != nil { + return nil, err + } + if pressure != nil { + memoryStat.Pressure = pressure + } + + err = parseKeyValueFile(filepath.Join(path, memoryEventsFile), func(key string, value uint64) { + switch key { + case memoryLow: + memoryStat.LowEvents = value + case memoryHigh: + memoryStat.HighEvents = value + case memoryMax: + memoryStat.MaxEvents = value + case memoryOom: + memoryStat.OomEvents = value + case memoryOomKill: + memoryStat.OomKillEvents = value + } + }) + + if err != nil { + return nil, err + } + + peak, err := parseSingleValueFile(filepath.Join(path, memoryPeakFile)) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return nil, err + } + } else { + memoryStat.Peak = &peak + } + + swap, err := parseSingleValueFile(filepath.Join(path, memorySwapCurrentFile)) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return nil, err + } + } else { + memoryStat.SwapBytes = &swap + } + + return memoryStat, nil +} + +func parseKeyValueFile(filePath string, callback func(key string, value uint64)) error { + content, err := os.ReadFile(filePath) + if err != nil { + return errors.Wrapf(err, "failed to read %s", filePath) + } + + lines := strings.Split(string(content), "\n") + for _, line := range lines { + if len(strings.TrimSpace(line)) == 0 { + continue + } + + fields := strings.Fields(line) + key := fields[0] + valueStr := fields[1] + value, err := strconv.ParseUint(valueStr, 10, 64) + if err != nil { + return errors.Wrapf(err, "failed to parse value for %s", key) + } + + callback(key, value) + } + + return nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/monitor.go b/vendor/github.com/moby/buildkit/executor/resources/monitor.go new file mode 100644 index 0000000000..95b954bcbe --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/monitor.go @@ -0,0 +1,287 @@ +package resources + +import ( + "bufio" + "context" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/moby/buildkit/executor/resources/types" + "github.com/moby/buildkit/util/network" + "github.com/prometheus/procfs" + "github.com/sirupsen/logrus" +) + +const ( + cgroupProcsFile = "cgroup.procs" + cgroupControllersFile = "cgroup.controllers" + cgroupSubtreeFile = "cgroup.subtree_control" + defaultMountpoint = "/sys/fs/cgroup" + initGroup = "init" +) + +var initOnce sync.Once +var isCgroupV2 bool + +type cgroupRecord struct { + once sync.Once + ns string + sampler *Sub[*types.Sample] + closeSampler func() error + samples []*types.Sample + err error + done chan struct{} + monitor *Monitor + netSampler NetworkSampler + startCPUStat *procfs.CPUStat + sysCPUStat *types.SysCPUStat +} + +func (r *cgroupRecord) Wait() error { + go r.close() + <-r.done + return r.err +} + +func (r *cgroupRecord) Start() { + if stat, err := r.monitor.proc.Stat(); err == nil { + r.startCPUStat = &stat.CPUTotal + } + s := NewSampler(2*time.Second, 10, r.sample) + r.sampler = s.Record() + r.closeSampler = s.Close +} + +func (r *cgroupRecord) Close() { + r.close() +} + +func (r *cgroupRecord) CloseAsync(next func(context.Context) error) error { + go func() { + r.close() + next(context.TODO()) + }() + return nil +} + +func (r *cgroupRecord) close() { + r.once.Do(func() { + defer close(r.done) + go func() { + r.monitor.mu.Lock() + delete(r.monitor.records, r.ns) + r.monitor.mu.Unlock() + }() + if r.sampler == nil { + return + } + s, err := r.sampler.Close(true) + if err != nil { + r.err = err + } else { + r.samples = s + } + r.closeSampler() + + if r.startCPUStat != nil { + stat, err := r.monitor.proc.Stat() + if err == nil { + cpu := &types.SysCPUStat{ + User: stat.CPUTotal.User - r.startCPUStat.User, + Nice: stat.CPUTotal.Nice - r.startCPUStat.Nice, + System: stat.CPUTotal.System - r.startCPUStat.System, + Idle: stat.CPUTotal.Idle - r.startCPUStat.Idle, + Iowait: stat.CPUTotal.Iowait - r.startCPUStat.Iowait, + IRQ: stat.CPUTotal.IRQ - r.startCPUStat.IRQ, + SoftIRQ: stat.CPUTotal.SoftIRQ - r.startCPUStat.SoftIRQ, + Steal: stat.CPUTotal.Steal - r.startCPUStat.Steal, + Guest: stat.CPUTotal.Guest - r.startCPUStat.Guest, + GuestNice: stat.CPUTotal.GuestNice - r.startCPUStat.GuestNice, + } + r.sysCPUStat = cpu + } + } + }) +} + +func (r *cgroupRecord) sample(tm time.Time) (*types.Sample, error) { + cpu, err := getCgroupCPUStat(filepath.Join(defaultMountpoint, r.ns)) + if err != nil { + return nil, err + } + memory, err := getCgroupMemoryStat(filepath.Join(defaultMountpoint, r.ns)) + if err != nil { + return nil, err + } + io, err := getCgroupIOStat(filepath.Join(defaultMountpoint, r.ns)) + if err != nil { + return nil, err + } + pids, err := getCgroupPIDsStat(filepath.Join(defaultMountpoint, r.ns)) + if err != nil { + return nil, err + } + sample := &types.Sample{ + Timestamp_: tm, + CPUStat: cpu, + MemoryStat: memory, + IOStat: io, + PIDsStat: pids, + } + if r.netSampler != nil { + net, err := r.netSampler.Sample() + if err != nil { + return nil, err + } + sample.NetStat = net + } + return sample, nil +} + +func (r *cgroupRecord) Samples() (*types.Samples, error) { + <-r.done + if r.err != nil { + return nil, r.err + } + return &types.Samples{ + Samples: r.samples, + SysCPUStat: r.sysCPUStat, + }, nil +} + +type nopRecord struct { +} + +func (r *nopRecord) Wait() error { + return nil +} + +func (r *nopRecord) Samples() (*types.Samples, error) { + return nil, nil +} + +func (r *nopRecord) Close() { +} + +func (r *nopRecord) CloseAsync(next func(context.Context) error) error { + return next(context.TODO()) +} + +func (r *nopRecord) Start() { +} + +type Monitor struct { + mu sync.Mutex + closed chan struct{} + records map[string]*cgroupRecord + proc procfs.FS +} + +type NetworkSampler interface { + Sample() (*network.Sample, error) +} + +type RecordOpt struct { + NetworkSampler NetworkSampler +} + +func (m *Monitor) RecordNamespace(ns string, opt RecordOpt) (types.Recorder, error) { + isClosed := false + select { + case <-m.closed: + isClosed = true + default: + } + if !isCgroupV2 || isClosed { + return &nopRecord{}, nil + } + r := &cgroupRecord{ + ns: ns, + done: make(chan struct{}), + monitor: m, + netSampler: opt.NetworkSampler, + } + m.mu.Lock() + m.records[ns] = r + m.mu.Unlock() + return r, nil +} + +func (m *Monitor) Close() error { + close(m.closed) + m.mu.Lock() + defer m.mu.Unlock() + + for _, r := range m.records { + r.close() + } + return nil +} + +func NewMonitor() (*Monitor, error) { + initOnce.Do(func() { + isCgroupV2 = isCgroup2() + if !isCgroupV2 { + return + } + if err := prepareCgroupControllers(); err != nil { + logrus.Warnf("failed to prepare cgroup controllers: %+v", err) + } + }) + + fs, err := procfs.NewDefaultFS() + if err != nil { + return nil, err + } + + return &Monitor{ + closed: make(chan struct{}), + records: make(map[string]*cgroupRecord), + proc: fs, + }, nil +} + +func prepareCgroupControllers() error { + v, ok := os.LookupEnv("BUILDKIT_SETUP_CGROUPV2_ROOT") + if !ok { + return nil + } + if b, _ := strconv.ParseBool(v); !b { + return nil + } + // move current process to init cgroup + if err := os.MkdirAll(filepath.Join(defaultMountpoint, initGroup), 0755); err != nil { + return err + } + f, err := os.OpenFile(filepath.Join(defaultMountpoint, cgroupProcsFile), os.O_RDONLY, 0) + if err != nil { + return err + } + s := bufio.NewScanner(f) + for s.Scan() { + if err := os.WriteFile(filepath.Join(defaultMountpoint, initGroup, cgroupProcsFile), s.Bytes(), 0); err != nil { + return err + } + } + if err := f.Close(); err != nil { + return err + } + dt, err := os.ReadFile(filepath.Join(defaultMountpoint, cgroupControllersFile)) + if err != nil { + return err + } + for _, c := range strings.Split(string(dt), " ") { + if c == "" { + continue + } + if err := os.WriteFile(filepath.Join(defaultMountpoint, cgroupSubtreeFile), []byte("+"+c), 0); err != nil { + // ignore error + logrus.Warnf("failed to enable cgroup controller %q: %+v", c, err) + } + } + return nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/monitor_linux.go b/vendor/github.com/moby/buildkit/executor/resources/monitor_linux.go new file mode 100644 index 0000000000..aefc2adce7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/monitor_linux.go @@ -0,0 +1,15 @@ +//go:build linux +// +build linux + +package resources + +import "golang.org/x/sys/unix" + +func isCgroup2() bool { + var st unix.Statfs_t + err := unix.Statfs(defaultMountpoint, &st) + if err != nil { + return false + } + return st.Type == unix.CGROUP2_SUPER_MAGIC +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/monitor_nolinux.go b/vendor/github.com/moby/buildkit/executor/resources/monitor_nolinux.go new file mode 100644 index 0000000000..20a50a648c --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/monitor_nolinux.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package resources + +func isCgroup2() bool { + return false +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/pids.go b/vendor/github.com/moby/buildkit/executor/resources/pids.go new file mode 100644 index 0000000000..88493d805e --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/pids.go @@ -0,0 +1,45 @@ +package resources + +import ( + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/moby/buildkit/executor/resources/types" + "github.com/pkg/errors" +) + +const ( + pidsCurrentFile = "pids.current" +) + +func getCgroupPIDsStat(path string) (*types.PIDsStat, error) { + pidsStat := &types.PIDsStat{} + + v, err := parseSingleValueFile(filepath.Join(path, pidsCurrentFile)) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return nil, err + } + } else { + pidsStat.Current = &v + } + + return pidsStat, nil +} + +func parseSingleValueFile(filePath string) (uint64, error) { + content, err := os.ReadFile(filePath) + if err != nil { + return 0, errors.Wrapf(err, "failed to read %s", filePath) + } + + valueStr := strings.TrimSpace(string(content)) + value, err := strconv.ParseUint(valueStr, 10, 64) + if err != nil { + return 0, errors.Wrapf(err, "failed to parse value: %s", valueStr) + } + + return value, nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/sampler.go b/vendor/github.com/moby/buildkit/executor/resources/sampler.go new file mode 100644 index 0000000000..38e94812da --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/sampler.go @@ -0,0 +1,139 @@ +package resources + +import ( + "sync" + "time" +) + +type WithTimestamp interface { + Timestamp() time.Time +} + +type Sampler[T WithTimestamp] struct { + mu sync.Mutex + minInterval time.Duration + maxSamples int + callback func(ts time.Time) (T, error) + doneOnce sync.Once + done chan struct{} + running bool + subs map[*Sub[T]]struct{} +} + +type Sub[T WithTimestamp] struct { + sampler *Sampler[T] + interval time.Duration + first time.Time + last time.Time + samples []T + err error +} + +func (s *Sub[T]) Close(captureLast bool) ([]T, error) { + s.sampler.mu.Lock() + delete(s.sampler.subs, s) + + if s.err != nil { + s.sampler.mu.Unlock() + return nil, s.err + } + current := s.first + out := make([]T, 0, len(s.samples)+1) + for i, v := range s.samples { + ts := v.Timestamp() + if i == 0 || ts.Sub(current) >= s.interval { + out = append(out, v) + current = ts + } + } + s.sampler.mu.Unlock() + + if captureLast { + v, err := s.sampler.callback(time.Now()) + if err != nil { + return nil, err + } + out = append(out, v) + } + + return out, nil +} + +func NewSampler[T WithTimestamp](minInterval time.Duration, maxSamples int, cb func(time.Time) (T, error)) *Sampler[T] { + s := &Sampler[T]{ + minInterval: minInterval, + maxSamples: maxSamples, + callback: cb, + done: make(chan struct{}), + subs: make(map[*Sub[T]]struct{}), + } + return s +} + +func (s *Sampler[T]) Record() *Sub[T] { + ss := &Sub[T]{ + interval: s.minInterval, + first: time.Now(), + sampler: s, + } + s.mu.Lock() + s.subs[ss] = struct{}{} + if !s.running { + s.running = true + go s.run() + } + s.mu.Unlock() + return ss +} + +func (s *Sampler[T]) run() { + ticker := time.NewTimer(s.minInterval) + for { + select { + case <-s.done: + ticker.Stop() + return + case <-ticker.C: + tm := time.Now() + s.mu.Lock() + active := make([]*Sub[T], 0, len(s.subs)) + for ss := range s.subs { + if tm.Sub(ss.last) < ss.interval { + continue + } + ss.last = tm + active = append(active, ss) + } + s.mu.Unlock() + ticker = time.NewTimer(s.minInterval) + if len(active) == 0 { + continue + } + value, err := s.callback(tm) + s.mu.Lock() + for _, ss := range active { + if _, found := s.subs[ss]; !found { + continue // skip if Close() was called while the lock was released + } + if err != nil { + ss.err = err + } else { + ss.samples = append(ss.samples, value) + ss.err = nil + } + dur := ss.last.Sub(ss.first) + if time.Duration(ss.interval)*time.Duration(s.maxSamples) <= dur { + ss.interval *= 2 + } + } + s.mu.Unlock() + } + } +} + +func (s *Sampler[T]) Close() error { + s.doneOnce.Do(func() { + close(s.done) + }) + return nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/sys.go b/vendor/github.com/moby/buildkit/executor/resources/sys.go new file mode 100644 index 0000000000..7082517adc --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/sys.go @@ -0,0 +1,9 @@ +package resources + +import "github.com/moby/buildkit/executor/resources/types" + +type SysSampler = Sub[*types.SysSample] + +func NewSysSampler() (*Sampler[*types.SysSample], error) { + return newSysSampler() +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/sys_linux.go b/vendor/github.com/moby/buildkit/executor/resources/sys_linux.go new file mode 100644 index 0000000000..d7835137ba --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/sys_linux.go @@ -0,0 +1,93 @@ +package resources + +import ( + "os" + "time" + + "github.com/moby/buildkit/executor/resources/types" + "github.com/prometheus/procfs" +) + +func newSysSampler() (*Sampler[*types.SysSample], error) { + pfs, err := procfs.NewDefaultFS() + if err != nil { + return nil, err + } + + return NewSampler(2*time.Second, 20, func(tm time.Time) (*types.SysSample, error) { + return sampleSys(pfs, tm) + }), nil +} + +func sampleSys(proc procfs.FS, tm time.Time) (*types.SysSample, error) { + stat, err := proc.Stat() + if err != nil { + return nil, err + } + + s := &types.SysSample{ + Timestamp_: tm, + } + + s.CPUStat = &types.SysCPUStat{ + User: stat.CPUTotal.User, + Nice: stat.CPUTotal.Nice, + System: stat.CPUTotal.System, + Idle: stat.CPUTotal.Idle, + Iowait: stat.CPUTotal.Iowait, + IRQ: stat.CPUTotal.IRQ, + SoftIRQ: stat.CPUTotal.SoftIRQ, + Steal: stat.CPUTotal.Steal, + Guest: stat.CPUTotal.Guest, + GuestNice: stat.CPUTotal.GuestNice, + } + + s.ProcStat = &types.ProcStat{ + ContextSwitches: stat.ContextSwitches, + ProcessCreated: stat.ProcessCreated, + ProcessesRunning: stat.ProcessesRunning, + } + + mem, err := proc.Meminfo() + if err != nil { + return nil, err + } + + s.MemoryStat = &types.SysMemoryStat{ + Total: mem.MemTotal, + Free: mem.MemFree, + Buffers: mem.Buffers, + Cached: mem.Cached, + Active: mem.Active, + Inactive: mem.Inactive, + Swap: mem.SwapTotal, + Available: mem.MemAvailable, + Dirty: mem.Dirty, + Writeback: mem.Writeback, + Slab: mem.Slab, + } + + if _, err := os.Lstat("/proc/pressure"); err != nil { + return s, nil + } + + cp, err := parsePressureFile("/proc/pressure/cpu") + if err != nil { + return nil, err + } + s.CPUPressure = cp + + mp, err := parsePressureFile("/proc/pressure/memory") + if err != nil { + return nil, err + } + s.MemoryPressure = mp + + ip, err := parsePressureFile("/proc/pressure/io") + if err != nil { + return nil, err + } + s.IOPressure = ip + + return s, nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/sys_nolinux.go b/vendor/github.com/moby/buildkit/executor/resources/sys_nolinux.go new file mode 100644 index 0000000000..dd0da8582e --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/sys_nolinux.go @@ -0,0 +1,9 @@ +//go:build !linux + +package resources + +import "github.com/moby/buildkit/executor/resources/types" + +func newSysSampler() (*Sampler[*types.SysSample], error) { + return nil, nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/types/systypes.go b/vendor/github.com/moby/buildkit/executor/resources/types/systypes.go new file mode 100644 index 0000000000..56db46945b --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/types/systypes.go @@ -0,0 +1,72 @@ +package types + +import ( + "encoding/json" + "math" + "time" +) + +type SysCPUStat struct { + User float64 `json:"user"` + Nice float64 `json:"nice"` + System float64 `json:"system"` + Idle float64 `json:"idle"` + Iowait float64 `json:"iowait"` + IRQ float64 `json:"irq"` + SoftIRQ float64 `json:"softirq"` + Steal float64 `json:"steal"` + Guest float64 `json:"guest"` + GuestNice float64 `json:"guestNice"` +} + +type sysCPUStatAlias SysCPUStat // avoid recursion of MarshalJSON + +func (s SysCPUStat) MarshalJSON() ([]byte, error) { + return json.Marshal(sysCPUStatAlias{ + User: math.Round(s.User*1000) / 1000, + Nice: math.Round(s.Nice*1000) / 1000, + System: math.Round(s.System*1000) / 1000, + Idle: math.Round(s.Idle*1000) / 1000, + Iowait: math.Round(s.Iowait*1000) / 1000, + IRQ: math.Round(s.IRQ*1000) / 1000, + SoftIRQ: math.Round(s.SoftIRQ*1000) / 1000, + Steal: math.Round(s.Steal*1000) / 1000, + Guest: math.Round(s.Guest*1000) / 1000, + GuestNice: math.Round(s.GuestNice*1000) / 1000, + }) +} + +type ProcStat struct { + ContextSwitches uint64 `json:"contextSwitches"` + ProcessCreated uint64 `json:"processCreated"` + ProcessesRunning uint64 `json:"processesRunning"` +} + +type SysMemoryStat struct { + Total *uint64 `json:"total"` + Free *uint64 `json:"free"` + Available *uint64 `json:"available"` + Buffers *uint64 `json:"buffers"` + Cached *uint64 `json:"cached"` + Active *uint64 `json:"active"` + Inactive *uint64 `json:"inactive"` + Swap *uint64 `json:"swap"` + Dirty *uint64 `json:"dirty"` + Writeback *uint64 `json:"writeback"` + Slab *uint64 `json:"slab"` +} + +type SysSample struct { + //nolint + Timestamp_ time.Time `json:"timestamp"` + CPUStat *SysCPUStat `json:"cpuStat,omitempty"` + ProcStat *ProcStat `json:"procStat,omitempty"` + MemoryStat *SysMemoryStat `json:"memoryStat,omitempty"` + CPUPressure *Pressure `json:"cpuPressure,omitempty"` + MemoryPressure *Pressure `json:"memoryPressure,omitempty"` + IOPressure *Pressure `json:"ioPressure,omitempty"` +} + +func (s *SysSample) Timestamp() time.Time { + return s.Timestamp_ +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/types/types.go b/vendor/github.com/moby/buildkit/executor/resources/types/types.go new file mode 100644 index 0000000000..9bac557e21 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/types/types.go @@ -0,0 +1,104 @@ +package types + +import ( + "context" + "time" + + "github.com/moby/buildkit/util/network" +) + +type Recorder interface { + Start() + Close() + CloseAsync(func(context.Context) error) error + Wait() error + Samples() (*Samples, error) +} + +type Samples struct { + Samples []*Sample `json:"samples,omitempty"` + SysCPUStat *SysCPUStat `json:"sysCPUStat,omitempty"` +} + +// Sample represents a wrapper for sampled data of cgroupv2 controllers +type Sample struct { + //nolint + Timestamp_ time.Time `json:"timestamp"` + CPUStat *CPUStat `json:"cpuStat,omitempty"` + MemoryStat *MemoryStat `json:"memoryStat,omitempty"` + IOStat *IOStat `json:"ioStat,omitempty"` + PIDsStat *PIDsStat `json:"pidsStat,omitempty"` + NetStat *network.Sample `json:"netStat,omitempty"` +} + +func (s *Sample) Timestamp() time.Time { + return s.Timestamp_ +} + +// CPUStat represents the sampling state of the cgroupv2 CPU controller +type CPUStat struct { + UsageNanos *uint64 `json:"usageNanos,omitempty"` + UserNanos *uint64 `json:"userNanos,omitempty"` + SystemNanos *uint64 `json:"systemNanos,omitempty"` + NrPeriods *uint32 `json:"nrPeriods,omitempty"` + NrThrottled *uint32 `json:"nrThrottled,omitempty"` + ThrottledNanos *uint64 `json:"throttledNanos,omitempty"` + Pressure *Pressure `json:"pressure,omitempty"` +} + +// MemoryStat represents the sampling state of the cgroupv2 memory controller +type MemoryStat struct { + SwapBytes *uint64 `json:"swapBytes,omitempty"` + Anon *uint64 `json:"anon,omitempty"` + File *uint64 `json:"file,omitempty"` + Kernel *uint64 `json:"kernel,omitempty"` + KernelStack *uint64 `json:"kernelStack,omitempty"` + PageTables *uint64 `json:"pageTables,omitempty"` + Sock *uint64 `json:"sock,omitempty"` + Vmalloc *uint64 `json:"vmalloc,omitempty"` + Shmem *uint64 `json:"shmem,omitempty"` + FileMapped *uint64 `json:"fileMapped,omitempty"` + FileDirty *uint64 `json:"fileDirty,omitempty"` + FileWriteback *uint64 `json:"fileWriteback,omitempty"` + Slab *uint64 `json:"slab,omitempty"` + Pgscan *uint64 `json:"pgscan,omitempty"` + Pgsteal *uint64 `json:"pgsteal,omitempty"` + Pgfault *uint64 `json:"pgfault,omitempty"` + Pgmajfault *uint64 `json:"pgmajfault,omitempty"` + Peak *uint64 `json:"peak,omitempty"` + LowEvents uint64 `json:"lowEvents,omitempty"` + HighEvents uint64 `json:"highEvents,omitempty"` + MaxEvents uint64 `json:"maxEvents,omitempty"` + OomEvents uint64 `json:"oomEvents,omitempty"` + OomKillEvents uint64 `json:"oomKillEvents,omitempty"` + Pressure *Pressure `json:"pressure,omitempty"` +} + +// IOStat represents the sampling state of the cgroupv2 IO controller +type IOStat struct { + ReadBytes *uint64 `json:"readBytes,omitempty"` + WriteBytes *uint64 `json:"writeBytes,omitempty"` + DiscardBytes *uint64 `json:"discardBytes,omitempty"` + ReadIOs *uint64 `json:"readIOs,omitempty"` + WriteIOs *uint64 `json:"writeIOs,omitempty"` + DiscardIOs *uint64 `json:"discardIOs,omitempty"` + Pressure *Pressure `json:"pressure,omitempty"` +} + +// PIDsStat represents the sampling state of the cgroupv2 PIDs controller +type PIDsStat struct { + Current *uint64 `json:"current,omitempty"` +} + +// Pressure represents the sampling state of pressure files +type Pressure struct { + Some *PressureValues `json:"some"` + Full *PressureValues `json:"full"` +} + +type PressureValues struct { + Avg10 *float64 `json:"avg10"` + Avg60 *float64 `json:"avg60"` + Avg300 *float64 `json:"avg300"` + Total *uint64 `json:"total"` +} diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go index 213ebb7366..e804ee850b 100644 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go @@ -7,6 +7,7 @@ import ( "os" "os/exec" "path/filepath" + "strconv" "sync" "syscall" "time" @@ -22,6 +23,8 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor/oci" + "github.com/moby/buildkit/executor/resources" + resourcestypes "github.com/moby/buildkit/executor/resources/types" gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/solver/pb" @@ -50,6 +53,7 @@ type Opt struct { ApparmorProfile string SELinux bool TracingSocket string + ResourceMonitor *resources.Monitor } var defaultCommandCandidates = []string{"buildkit-runc", "runc"} @@ -70,6 +74,7 @@ type runcExecutor struct { apparmorProfile string selinux bool tracingSocket string + resmon *resources.Monitor } func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Executor, error) { @@ -92,7 +97,7 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex root := opt.Root - if err := os.MkdirAll(root, 0711); err != nil { + if err := os.MkdirAll(root, 0o711); err != nil { return nil, errors.Wrapf(err, "failed to create %s", root) } @@ -135,11 +140,12 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex apparmorProfile: opt.ApparmorProfile, selinux: opt.SELinux, tracingSocket: opt.TracingSocket, + resmon: opt.ResourceMonitor, } return w, nil } -func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { +func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (rec resourcestypes.Recorder, err error) { meta := process.Meta startedOnce := sync.Once{} @@ -162,13 +168,18 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, provider, ok := w.networkProviders[meta.NetMode] if !ok { - return errors.Errorf("unknown network mode %s", meta.NetMode) + return nil, errors.Errorf("unknown network mode %s", meta.NetMode) } namespace, err := provider.New(ctx, meta.Hostname) if err != nil { - return err + return nil, err } - defer namespace.Close() + doReleaseNetwork := true + defer func() { + if doReleaseNetwork { + namespace.Close() + } + }() if meta.NetMode == pb.NetMode_HOST { bklog.G(ctx).Info("enabling HostNetworking") @@ -176,12 +187,12 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, resolvConf, err := oci.GetResolvConf(ctx, w.root, w.idmap, w.dns) if err != nil { - return err + return nil, err } hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, w.idmap, meta.Hostname) if err != nil { - return err + return nil, err } if clean != nil { defer clean() @@ -189,12 +200,12 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, mountable, err := root.Src.Mount(ctx, false) if err != nil { - return err + return nil, err } rootMount, release, err := mountable.Mount() if err != nil { - return err + return nil, err } if release != nil { defer release() @@ -205,8 +216,8 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, } bundle := filepath.Join(w.root, id) - if err := os.Mkdir(bundle, 0711); err != nil { - return err + if err := os.Mkdir(bundle, 0o711); err != nil { + return nil, err } defer os.RemoveAll(bundle) @@ -216,24 +227,24 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, } rootFSPath := filepath.Join(bundle, "rootfs") - if err := idtools.MkdirAllAndChown(rootFSPath, 0700, identity); err != nil { - return err + if err := idtools.MkdirAllAndChown(rootFSPath, 0o700, identity); err != nil { + return nil, err } if err := mount.All(rootMount, rootFSPath); err != nil { - return err + return nil, err } defer mount.Unmount(rootFSPath, 0) - defer executor.MountStubsCleaner(rootFSPath, mounts, meta.RemoveMountStubsRecursive)() + defer executor.MountStubsCleaner(ctx, rootFSPath, mounts, meta.RemoveMountStubsRecursive)() uid, gid, sgids, err := oci.GetUser(rootFSPath, meta.User) if err != nil { - return err + return nil, err } f, err := os.Create(filepath.Join(bundle, "config.json")) if err != nil { - return err + return nil, err } defer f.Close() @@ -250,13 +261,13 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, if w.idmap != nil { identity, err = w.idmap.ToHost(identity) if err != nil { - return err + return nil, err } } spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, w.processMode, w.idmap, w.apparmorProfile, w.selinux, w.tracingSocket, opts...) if err != nil { - return err + return nil, err } defer cleanup() @@ -267,11 +278,11 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, newp, err := fs.RootPath(rootFSPath, meta.Cwd) if err != nil { - return errors.Wrapf(err, "working dir %s points to invalid target", newp) + return nil, errors.Wrapf(err, "working dir %s points to invalid target", newp) } if _, err := os.Stat(newp); err != nil { - if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil { - return errors.Wrapf(err, "failed to create working directory %s", newp) + if err := idtools.MkdirAllAndChown(newp, 0o755, identity); err != nil { + return nil, errors.Wrapf(err, "failed to create working directory %s", newp) } } @@ -279,59 +290,63 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, spec.Process.OOMScoreAdj = w.oomScoreAdj if w.rootless { if err := rootlessspecconv.ToRootless(spec); err != nil { - return err + return nil, err } } if err := json.NewEncoder(f).Encode(spec); err != nil { - return err + return nil, err } - // runCtx/killCtx is used for extra check in case the kill command blocks - runCtx, cancelRun := context.WithCancel(context.Background()) - defer cancelRun() - - ended := make(chan struct{}) - go func() { - for { - select { - case <-ctx.Done(): - killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second) - if err := w.runc.Kill(killCtx, id, int(syscall.SIGKILL), nil); err != nil { - bklog.G(ctx).Errorf("failed to kill runc %s: %+v", id, err) - select { - case <-killCtx.Done(): - timeout() - cancelRun() - return - default: - } - } - timeout() - select { - case <-time.After(50 * time.Millisecond): - case <-ended: - return - } - case <-ended: - return - } - } - }() - bklog.G(ctx).Debugf("> creating %s %v", id, meta.Args) + cgroupPath := spec.Linux.CgroupsPath + if cgroupPath != "" { + rec, err = w.resmon.RecordNamespace(cgroupPath, resources.RecordOpt{ + NetworkSampler: namespace, + }) + if err != nil { + return nil, err + } + } + trace.SpanFromContext(ctx).AddEvent("Container created") - err = w.run(runCtx, id, bundle, process, func() { + err = w.run(ctx, id, bundle, process, func() { startedOnce.Do(func() { trace.SpanFromContext(ctx).AddEvent("Container started") if started != nil { close(started) } + if rec != nil { + rec.Start() + } }) - }) - close(ended) - return exitError(ctx, err) + }, true) + + releaseContainer := func(ctx context.Context) error { + err := w.runc.Delete(ctx, id, &runc.DeleteOpts{}) + err1 := namespace.Close() + if err == nil { + err = err1 + } + return err + } + doReleaseNetwork = false + + err = exitError(ctx, err) + if err != nil { + if rec != nil { + rec.Close() + } + releaseContainer(context.TODO()) + return nil, err + } + + if rec == nil { + return nil, releaseContainer(context.TODO()) + } + + return rec, rec.CloseAsync(releaseContainer) } func exitError(ctx context.Context, err error) error { @@ -341,7 +356,7 @@ func exitError(ctx context.Context, err error) error { Err: err, } var runcExitError *runc.ExitError - if errors.As(err, &runcExitError) { + if errors.As(err, &runcExitError) && runcExitError.Status >= 0 { exitErr = &gatewayapi.ExitError{ ExitCode: uint32(runcExitError.Status), } @@ -462,23 +477,190 @@ func (s *forwardIO) Stderr() io.ReadCloser { return nil } -// startingProcess is to track the os process so we can send signals to it. -type startingProcess struct { - Process *os.Process - ready chan struct{} +// newRuncProcKiller returns an abstraction for sending SIGKILL to the +// process inside the container initiated from `runc run`. +func newRunProcKiller(runC *runc.Runc, id string) procKiller { + return procKiller{runC: runC, id: id} } -// Release will free resources with a startingProcess. -func (p *startingProcess) Release() { - if p.Process != nil { - p.Process.Release() +// newExecProcKiller returns an abstraction for sending SIGKILL to the +// process inside the container initiated from `runc exec`. +func newExecProcKiller(runC *runc.Runc, id string) (procKiller, error) { + // for `runc exec` we need to create a pidfile and read it later to kill + // the process + tdir, err := os.MkdirTemp("", "runc") + if err != nil { + return procKiller{}, errors.Wrap(err, "failed to create directory for runc pidfile") + } + + return procKiller{ + runC: runC, + id: id, + pidfile: filepath.Join(tdir, "pidfile"), + cleanup: func() { + os.RemoveAll(tdir) + }, + }, nil +} + +type procKiller struct { + runC *runc.Runc + id string + pidfile string + cleanup func() +} + +// Cleanup will delete any tmp files created for the pidfile allocation +// if this killer was for a `runc exec` process. +func (k procKiller) Cleanup() { + if k.cleanup != nil { + k.cleanup() } } -// WaitForReady will wait until the Process has been populated or the -// provided context was cancelled. This should be called before using -// the Process field. -func (p *startingProcess) WaitForReady(ctx context.Context) error { +// Kill will send SIGKILL to the process running inside the container. +// If the process was created by `runc run` then we will use `runc kill`, +// otherwise for `runc exec` we will read the pid from a pidfile and then +// send the signal directly that process. +func (k procKiller) Kill(ctx context.Context) (err error) { + bklog.G(ctx).Debugf("sending sigkill to process in container %s", k.id) + defer func() { + if err != nil { + bklog.G(ctx).Errorf("failed to kill process in container id %s: %+v", k.id, err) + } + }() + + // this timeout is generally a no-op, the Kill ctx should already have a + // shorter timeout but here as a fail-safe for future refactoring. + ctx, timeout := context.WithTimeout(ctx, 10*time.Second) + defer timeout() + + if k.pidfile == "" { + // for `runc run` process we use `runc kill` to terminate the process + return k.runC.Kill(ctx, k.id, int(syscall.SIGKILL), nil) + } + + // `runc exec` will write the pidfile a few milliseconds after we + // get the runc pid via the startedCh, so we might need to retry until + // it appears in the edge case where we want to kill a process + // immediately after it was created. + var pidData []byte + for { + pidData, err = os.ReadFile(k.pidfile) + if err != nil { + if os.IsNotExist(err) { + select { + case <-ctx.Done(): + return errors.New("context cancelled before runc wrote pidfile") + case <-time.After(10 * time.Millisecond): + continue + } + } + return errors.Wrap(err, "failed to read pidfile from runc") + } + break + } + pid, err := strconv.Atoi(string(pidData)) + if err != nil { + return errors.Wrap(err, "read invalid pid from pidfile") + } + process, err := os.FindProcess(pid) + if err != nil { + // error only possible on non-unix hosts + return errors.Wrapf(err, "failed to find process for pid %d from pidfile", pid) + } + defer process.Release() + return process.Signal(syscall.SIGKILL) +} + +// procHandle is to track the process so we can send signals to it +// and handle graceful shutdown. +type procHandle struct { + // this is for the runc process (not the process in-container) + monitorProcess *os.Process + ready chan struct{} + ended chan struct{} + shutdown func() + // this this only used when the request context is canceled and we need + // to kill the in-container process. + killer procKiller +} + +// runcProcessHandle will create a procHandle that will be monitored, where +// on ctx.Done the in-container process will receive a SIGKILL. The returned +// context should be used for the go-runc.(Run|Exec) invocations. The returned +// context will only be canceled in the case where the request context is +// canceled and we are unable to send the SIGKILL to the in-container process. +// The goal is to allow for runc to gracefully shutdown when the request context +// is cancelled. +func runcProcessHandle(ctx context.Context, killer procKiller) (*procHandle, context.Context) { + runcCtx, cancel := context.WithCancel(context.Background()) + p := &procHandle{ + ready: make(chan struct{}), + ended: make(chan struct{}), + shutdown: cancel, + killer: killer, + } + // preserve the logger on the context used for the runc process handling + runcCtx = bklog.WithLogger(runcCtx, bklog.G(ctx)) + + go func() { + // Wait for pid + select { + case <-ctx.Done(): + return // nothing to kill + case <-p.ready: + } + + for { + select { + case <-ctx.Done(): + killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second) + if err := p.killer.Kill(killCtx); err != nil { + select { + case <-killCtx.Done(): + timeout() + cancel() + return + default: + } + } + timeout() + select { + case <-time.After(50 * time.Millisecond): + case <-p.ended: + return + } + case <-p.ended: + return + } + } + }() + + return p, runcCtx +} + +// Release will free resources with a procHandle. +func (p *procHandle) Release() { + close(p.ended) + if p.monitorProcess != nil { + p.monitorProcess.Release() + } +} + +// Shutdown should be called after the runc process has exited. This will allow +// the signal handling and tty resize loops to exit, terminating the +// goroutines. +func (p *procHandle) Shutdown() { + if p.shutdown != nil { + p.shutdown() + } +} + +// WaitForReady will wait until we have received the runc pid via the go-runc +// Started channel, or until the request context is canceled. This should +// return without errors before attempting to send signals to the runc process. +func (p *procHandle) WaitForReady(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() @@ -487,35 +669,37 @@ func (p *startingProcess) WaitForReady(ctx context.Context) error { } } -// WaitForStart will record the pid reported by Runc via the channel. -// We wait for up to 10s for the runc process to start. If the started +// WaitForStart will record the runc pid reported by go-runc via the channel. +// We wait for up to 10s for the runc pid to be reported. If the started // callback is non-nil it will be called after receiving the pid. -func (p *startingProcess) WaitForStart(ctx context.Context, startedCh <-chan int, started func()) error { +func (p *procHandle) WaitForStart(ctx context.Context, startedCh <-chan int, started func()) error { startedCtx, timeout := context.WithTimeout(ctx, 10*time.Second) defer timeout() - var err error select { case <-startedCtx.Done(): - return errors.New("runc started message never received") - case pid, ok := <-startedCh: + return errors.New("go-runc started message never received") + case runcPid, ok := <-startedCh: if !ok { - return errors.New("runc process failed to send pid") + return errors.New("go-runc failed to send pid") } if started != nil { started() } - p.Process, err = os.FindProcess(pid) + var err error + p.monitorProcess, err = os.FindProcess(runcPid) if err != nil { - return errors.Wrapf(err, "unable to find runc process for pid %d", pid) + // error only possible on non-unix hosts + return errors.Wrapf(err, "failed to find runc process %d", runcPid) } close(p.ready) } return nil } -// handleSignals will wait until the runcProcess is ready then will -// send each signal received on the channel to the process. -func handleSignals(ctx context.Context, runcProcess *startingProcess, signals <-chan syscall.Signal) error { +// handleSignals will wait until the procHandle is ready then will +// send each signal received on the channel to the runc process (not directly +// to the in-container process) +func handleSignals(ctx context.Context, runcProcess *procHandle, signals <-chan syscall.Signal) error { if signals == nil { return nil } @@ -528,8 +712,15 @@ func handleSignals(ctx context.Context, runcProcess *startingProcess, signals <- case <-ctx.Done(): return nil case sig := <-signals: - err := runcProcess.Process.Signal(sig) - if err != nil { + if sig == syscall.SIGKILL { + // never send SIGKILL directly to runc, it needs to go to the + // process in-container + if err := runcProcess.killer.Kill(ctx); err != nil { + return err + } + continue + } + if err := runcProcess.monitorProcess.Signal(sig); err != nil { bklog.G(ctx).Errorf("failed to signal %s to process: %s", sig, err) return err } diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go index 447c4a96b9..28955f9a45 100644 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go @@ -8,6 +8,7 @@ import ( runc "github.com/containerd/go-runc" "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/util/bklog" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -17,15 +18,21 @@ var unsupportedConsoleError = errors.New("tty for runc is only supported on linu func updateRuncFieldsForHostOS(runtime *runc.Runc) {} -func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func()) error { +func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), keep bool) error { if process.Meta.Tty { return unsupportedConsoleError } - return w.commonCall(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error { + extraArgs := []string{} + if keep { + extraArgs = append(extraArgs, "--keep") + } + killer := newRunProcKiller(w.runc, id) + return w.commonCall(ctx, id, bundle, process, started, killer, func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error { _, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{ - NoPivot: w.noPivot, - Started: started, - IO: io, + NoPivot: w.noPivot, + Started: started, + IO: io, + ExtraArgs: extraArgs, }) return err }) @@ -35,38 +42,47 @@ func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess if process.Meta.Tty { return unsupportedConsoleError } - return w.commonCall(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error { + + killer, err := newExecProcKiller(w.runc, id) + if err != nil { + return errors.Wrap(err, "failed to initialize process killer") + } + defer killer.Cleanup() + + return w.commonCall(ctx, id, bundle, process, started, killer, func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error { return w.runc.Exec(ctx, id, *specsProcess, &runc.ExecOpts{ Started: started, IO: io, + PidFile: pidfile, }) }) } -type runcCall func(ctx context.Context, started chan<- int, io runc.IO) error +type runcCall func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error // commonCall is the common run/exec logic used for non-linux runtimes. A tty // is only supported for linux, so this really just handles signal propagation // to the started runc process. -func (w *runcExecutor) commonCall(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), call runcCall) error { - runcProcess := &startingProcess{ - ready: make(chan struct{}), - } +func (w *runcExecutor) commonCall(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), killer procKiller, call runcCall) error { + runcProcess, ctx := runcProcessHandle(ctx, killer) defer runcProcess.Release() - var eg errgroup.Group - egCtx, cancel := context.WithCancel(ctx) - defer eg.Wait() - defer cancel() + eg, ctx := errgroup.WithContext(ctx) + defer func() { + if err := eg.Wait(); err != nil && !errors.Is(err, context.Canceled) { + bklog.G(ctx).Errorf("runc process monitoring error: %s", err) + } + }() + defer runcProcess.Shutdown() startedCh := make(chan int, 1) eg.Go(func() error { - return runcProcess.WaitForStart(egCtx, startedCh, started) + return runcProcess.WaitForStart(ctx, startedCh, started) }) eg.Go(func() error { - return handleSignals(egCtx, runcProcess, process.Signal) + return handleSignals(ctx, runcProcess, process.Signal) }) - return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}) + return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}, killer.pidfile) } diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go index 15ea812a5a..e2c14950f0 100644 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go @@ -21,50 +21,64 @@ func updateRuncFieldsForHostOS(runtime *runc.Runc) { runtime.PdeathSignal = syscall.SIGKILL // this can still leak the process } -func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func()) error { - return w.callWithIO(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error { +func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), keep bool) error { + killer := newRunProcKiller(w.runc, id) + return w.callWithIO(ctx, id, bundle, process, started, killer, func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error { + extraArgs := []string{} + if keep { + extraArgs = append(extraArgs, "--keep") + } _, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{ - NoPivot: w.noPivot, - Started: started, - IO: io, + NoPivot: w.noPivot, + Started: started, + IO: io, + ExtraArgs: extraArgs, }) return err }) } func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess *specs.Process, process executor.ProcessInfo, started func()) error { - return w.callWithIO(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error { + killer, err := newExecProcKiller(w.runc, id) + if err != nil { + return errors.Wrap(err, "failed to initialize process killer") + } + defer killer.Cleanup() + + return w.callWithIO(ctx, id, bundle, process, started, killer, func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error { return w.runc.Exec(ctx, id, *specsProcess, &runc.ExecOpts{ Started: started, IO: io, + PidFile: pidfile, }) }) } -type runcCall func(ctx context.Context, started chan<- int, io runc.IO) error +type runcCall func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error -func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), call runcCall) error { - runcProcess := &startingProcess{ - ready: make(chan struct{}), - } +func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), killer procKiller, call runcCall) error { + runcProcess, ctx := runcProcessHandle(ctx, killer) defer runcProcess.Release() - var eg errgroup.Group - egCtx, cancel := context.WithCancel(ctx) - defer eg.Wait() - defer cancel() + eg, ctx := errgroup.WithContext(ctx) + defer func() { + if err := eg.Wait(); err != nil && !errors.Is(err, context.Canceled) { + bklog.G(ctx).Errorf("runc process monitoring error: %s", err) + } + }() + defer runcProcess.Shutdown() startedCh := make(chan int, 1) eg.Go(func() error { - return runcProcess.WaitForStart(egCtx, startedCh, started) + return runcProcess.WaitForStart(ctx, startedCh, started) }) eg.Go(func() error { - return handleSignals(egCtx, runcProcess, process.Signal) + return handleSignals(ctx, runcProcess, process.Signal) }) if !process.Meta.Tty { - return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}) + return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}, killer.pidfile) } ptm, ptsName, err := console.NewPty() @@ -84,7 +98,7 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces } pts.Close() ptm.Close() - cancel() // this will shutdown resize and signal loops + runcProcess.Shutdown() err := eg.Wait() if err != nil { bklog.G(ctx).Warningf("error while shutting down tty io: %s", err) @@ -119,13 +133,13 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces } eg.Go(func() error { - err := runcProcess.WaitForReady(egCtx) + err := runcProcess.WaitForReady(ctx) if err != nil { return err } for { select { - case <-egCtx.Done(): + case <-ctx.Done(): return nil case resize := <-process.Resize: err = ptm.Resize(console.WinSize{ @@ -135,7 +149,9 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces if err != nil { bklog.G(ctx).Errorf("failed to resize ptm: %s", err) } - err = runcProcess.Process.Signal(signal.SIGWINCH) + // SIGWINCH must be sent to the runc monitor process, as + // terminal resizing is done in runc. + err = runcProcess.monitorProcess.Signal(signal.SIGWINCH) if err != nil { bklog.G(ctx).Errorf("failed to send SIGWINCH to process: %s", err) } @@ -154,5 +170,5 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces runcIO.stderr = pts } - return call(ctx, startedCh, runcIO) + return call(ctx, startedCh, runcIO, killer.pidfile) } diff --git a/vendor/github.com/moby/buildkit/executor/stubs.go b/vendor/github.com/moby/buildkit/executor/stubs.go index 22a8ac1310..e2ac460e20 100644 --- a/vendor/github.com/moby/buildkit/executor/stubs.go +++ b/vendor/github.com/moby/buildkit/executor/stubs.go @@ -1,17 +1,18 @@ package executor import ( + "context" "errors" "os" "path/filepath" "syscall" "github.com/containerd/continuity/fs" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/system" - "github.com/sirupsen/logrus" ) -func MountStubsCleaner(dir string, mounts []Mount, recursive bool) func() { +func MountStubsCleaner(ctx context.Context, dir string, mounts []Mount, recursive bool) func() { names := []string{"/etc/resolv.conf", "/etc/hosts"} for _, m := range mounts { @@ -72,23 +73,23 @@ func MountStubsCleaner(dir string, mounts []Mount, recursive bool) func() { dir := filepath.Dir(p) dirSt, err := os.Stat(dir) if err != nil { - logrus.WithError(err).Warnf("Failed to stat %q (parent of mount stub %q)", dir, p) + bklog.G(ctx).WithError(err).Warnf("Failed to stat %q (parent of mount stub %q)", dir, p) continue } mtime := dirSt.ModTime() atime, err := system.Atime(dirSt) if err != nil { - logrus.WithError(err).Warnf("Failed to stat atime of %q (parent of mount stub %q)", dir, p) + bklog.G(ctx).WithError(err).Warnf("Failed to stat atime of %q (parent of mount stub %q)", dir, p) atime = mtime } if err := os.Remove(p); err != nil { - logrus.WithError(err).Warnf("Failed to remove mount stub %q", p) + bklog.G(ctx).WithError(err).Warnf("Failed to remove mount stub %q", p) } // Restore the timestamps of the dir if err := os.Chtimes(dir, atime, mtime); err != nil { - logrus.WithError(err).Warnf("Failed to restore time time mount stub timestamp (os.Chtimes(%q, %v, %v))", dir, atime, mtime) + bklog.G(ctx).WithError(err).Warnf("Failed to restore time time mount stub timestamp (os.Chtimes(%q, %v, %v))", dir, atime, mtime) } } } diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/attestations.go b/vendor/github.com/moby/buildkit/exporter/containerimage/attestations.go index a41c6039f0..1c4837e36f 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/attestations.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/attestations.go @@ -20,11 +20,11 @@ import ( ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" spdx_json "github.com/spdx/tools-golang/json" - "github.com/spdx/tools-golang/spdx/common" - spdx "github.com/spdx/tools-golang/spdx/v2_3" + "github.com/spdx/tools-golang/spdx" + "github.com/spdx/tools-golang/spdx/v2/common" ) -var intotoPlatform ocispecs.Platform = ocispecs.Platform{ +var intotoPlatform = ocispecs.Platform{ Architecture: "unknown", OS: "unknown", } @@ -122,7 +122,7 @@ func supplementSBOM(ctx context.Context, s session.Group, target cache.Immutable } func decodeSPDX(dt []byte) (s *spdx.Document, err error) { - doc, err := spdx_json.Load2_3(bytes.NewReader(dt)) + doc, err := spdx_json.Read(bytes.NewReader(dt)) if err != nil { return nil, errors.Wrap(err, "unable to decode spdx") } @@ -134,7 +134,7 @@ func decodeSPDX(dt []byte) (s *spdx.Document, err error) { func encodeSPDX(s *spdx.Document) (dt []byte, err error) { w := bytes.NewBuffer(nil) - err = spdx_json.Save2_3(s, w) + err = spdx_json.Write(s, w) if err != nil { return nil, errors.Wrap(err, "unable to encode spdx") } diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/export.go b/vendor/github.com/moby/buildkit/exporter/containerimage/export.go index 55eaf3ff58..2c2775ac7e 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/export.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/export.go @@ -5,19 +5,18 @@ import ( "encoding/base64" "encoding/json" "fmt" + "sort" "strconv" "strings" - "time" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/pkg/epoch" "github.com/containerd/containerd/platforms" - "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/rootfs" - intoto "github.com/in-toto/in-toto-golang/in_toto" "github.com/moby/buildkit/cache" cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/exporter" @@ -33,17 +32,10 @@ import ( "github.com/opencontainers/image-spec/identity" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "golang.org/x/sync/errgroup" ) const ( - keyPush = "push" - keyPushByDigest = "push-by-digest" - keyInsecure = "registry.insecure" - keyUnpack = "unpack" - keyDanglingPrefix = "dangling-name-prefix" - keyNameCanonical = "name-canonical" - keyStore = "store" - // keyUnsafeInternalStoreAllowIncomplete should only be used for tests. This option allows exporting image to the image store // as well as lacking some blobs in the content store. Some integration tests for lazyref behaviour depends on this option. // Ignored when store=false. @@ -78,20 +70,19 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp RefCfg: cacheconfig.RefConfig{ Compression: compression.New(compression.Default), }, - BuildInfo: true, ForceInlineAttestations: true, }, store: true, } - opt, err := i.opts.Load(opt) + opt, err := i.opts.Load(ctx, opt) if err != nil { return nil, err } for k, v := range opt { - switch k { - case keyPush: + switch exptypes.ImageExporterOptKey(k) { + case exptypes.OptKeyPush: if v == "" { i.push = true continue @@ -101,7 +92,7 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } i.push = b - case keyPushByDigest: + case exptypes.OptKeyPushByDigest: if v == "" { i.pushByDigest = true continue @@ -111,7 +102,7 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } i.pushByDigest = b - case keyInsecure: + case exptypes.OptKeyInsecure: if v == "" { i.insecure = true continue @@ -121,7 +112,7 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } i.insecure = b - case keyUnpack: + case exptypes.OptKeyUnpack: if v == "" { i.unpack = true continue @@ -131,7 +122,7 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } i.unpack = b - case keyStore: + case exptypes.OptKeyStore: if v == "" { i.store = true continue @@ -151,9 +142,9 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } i.storeAllowIncomplete = b - case keyDanglingPrefix: + case exptypes.OptKeyDanglingPrefix: i.danglingPrefix = v - case keyNameCanonical: + case exptypes.OptKeyNameCanonical: if v == "" { i.nameCanonical = true continue @@ -247,60 +238,73 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source for _, targetName := range targetNames { if e.opt.Images != nil && e.store { tagDone := progress.OneOff(ctx, "naming to "+targetName) - img := images.Image{ - Target: *desc, - CreatedAt: time.Now(), + + // imageClientCtx is used for propagating the epoch to e.opt.Images.Update() and e.opt.Images.Create(). + // + // Ideally, we should be able to propagate the epoch via images.Image.CreatedAt. + // However, due to a bug of containerd, we are temporarily stuck with this workaround. + // https://github.com/containerd/containerd/issues/8322 + imageClientCtx := ctx + if e.opts.Epoch != nil { + imageClientCtx = epoch.WithSourceDateEpoch(imageClientCtx, e.opts.Epoch) } + img := images.Image{ + Target: *desc, + // CreatedAt in images.Images is ignored due to a bug of containerd. + // See the comment lines for imageClientCtx. + } + sfx := []string{""} if nameCanonical { sfx = append(sfx, "@"+desc.Digest.String()) } for _, sfx := range sfx { img.Name = targetName + sfx - if _, err := e.opt.Images.Update(ctx, img); err != nil { + if _, err := e.opt.Images.Update(imageClientCtx, img); err != nil { if !errors.Is(err, errdefs.ErrNotFound) { return nil, nil, tagDone(err) } - if _, err := e.opt.Images.Create(ctx, img); err != nil { + if _, err := e.opt.Images.Create(imageClientCtx, img); err != nil { return nil, nil, tagDone(err) } } } tagDone(nil) - if src.Ref != nil && e.unpack { + if e.unpack { if err := e.unpackImage(ctx, img, src, session.NewGroup(sessionID)); err != nil { return nil, nil, err } } if !e.storeAllowIncomplete { + var refs []cache.ImmutableRef if src.Ref != nil { - remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) - if err != nil { - return nil, nil, err - } - remote := remotes[0] - if unlazier, ok := remote.Provider.(cache.Unlazier); ok { - if err := unlazier.Unlazy(ctx); err != nil { - return nil, nil, err - } - } + refs = append(refs, src.Ref) } - if len(src.Refs) > 0 { - for _, r := range src.Refs { - remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + for _, ref := range src.Refs { + refs = append(refs, ref) + } + eg, ctx := errgroup.WithContext(ctx) + for _, ref := range refs { + ref := ref + eg.Go(func() error { + remotes, err := ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) if err != nil { - return nil, nil, err + return err } remote := remotes[0] if unlazier, ok := remote.Provider.(cache.Unlazier); ok { if err := unlazier.Unlazy(ctx); err != nil { - return nil, nil, err + return err } } - } + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, nil, err } } } @@ -330,10 +334,18 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source } func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Source, sessionID string, targetName string, dgst digest.Digest) error { + var refs []cache.ImmutableRef + if src.Ref != nil { + refs = append(refs, src.Ref) + } + for _, ref := range src.Refs { + refs = append(refs, ref) + } + annotations := map[digest.Digest]map[string]string{} mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) - if src.Ref != nil { - remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + for _, ref := range refs { + remotes, err := ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) if err != nil { return err } @@ -343,25 +355,36 @@ func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Sou addAnnotations(annotations, desc) } } - if len(src.Refs) > 0 { - for _, r := range src.Refs { - remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) - if err != nil { - return err - } - remote := remotes[0] - for _, desc := range remote.Descriptors { - mprovider.Add(desc.Digest, remote.Provider) - addAnnotations(annotations, desc) - } - } - } - - ctx = remotes.WithMediaTypeKeyPrefix(ctx, intoto.PayloadType, "intoto") return push.Push(ctx, e.opt.SessionManager, sessionID, mprovider, e.opt.ImageWriter.ContentStore(), dgst, targetName, e.insecure, e.opt.RegistryHosts, e.pushByDigest, annotations) } func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Image, src *exporter.Source, s session.Group) (err0 error) { + matcher := platforms.Only(platforms.Normalize(platforms.DefaultSpec())) + + ps, err := exptypes.ParsePlatforms(src.Metadata) + if err != nil { + return err + } + matching := []exptypes.Platform{} + for _, p2 := range ps.Platforms { + if matcher.Match(p2.Platform) { + matching = append(matching, p2) + } + } + if len(matching) == 0 { + // current platform was not found, so skip unpacking + return nil + } + sort.SliceStable(matching, func(i, j int) bool { + return matcher.Less(matching[i].Platform, matching[j].Platform) + }) + + ref, _ := src.FindRef(matching[0].ID) + if ref == nil { + // ref has no layers, so nothing to unpack + return nil + } + unpackDone := progress.OneOff(ctx, "unpacking to "+img.Name) defer func() { unpackDone(err0) @@ -379,16 +402,7 @@ func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Imag return err } - topLayerRef := src.Ref - if len(src.Refs) > 0 { - if r, ok := src.Refs[defaultPlatform()]; ok { - topLayerRef = r - } else { - return errors.Errorf("no reference for default platform %s", defaultPlatform()) - } - } - - remotes, err := topLayerRef.GetRemotes(ctx, true, e.opts.RefCfg, false, s) + remotes, err := ref.GetRemotes(ctx, true, e.opts.RefCfg, false, s) if err != nil { return err } @@ -461,12 +475,6 @@ func addAnnotations(m map[digest.Digest]map[string]string, desc ocispecs.Descrip } } -func defaultPlatform() string { - // Use normalized platform string to avoid the mismatch with platform options which - // are normalized using platforms.Normalize() - return platforms.Format(platforms.Normalize(platforms.DefaultSpec())) -} - func NewDescriptorReference(desc ocispecs.Descriptor, release func(context.Context) error) exporter.DescriptorReference { return &descriptorReference{ desc: desc, diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go new file mode 100644 index 0000000000..c432218499 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go @@ -0,0 +1,75 @@ +package exptypes + +import commonexptypes "github.com/moby/buildkit/exporter/exptypes" + +type ImageExporterOptKey string + +// Options keys supported by the image exporter output. +var ( + // Name of the image. + // Value: string + OptKeyName ImageExporterOptKey = "name" + + // Push after creating image. + // Value: bool + OptKeyPush ImageExporterOptKey = "push" + + // Push unnamed image. + // Value: bool + OptKeyPushByDigest ImageExporterOptKey = "push-by-digest" + + // Allow pushing to insecure HTTP registry. + // Value: bool + OptKeyInsecure ImageExporterOptKey = "registry.insecure" + + // Unpack image after it's created (containerd). + // Value: bool + OptKeyUnpack ImageExporterOptKey = "unpack" + + // Fallback image name prefix if image name isn't provided. + // If used, image will be named as @ + // Value: string + OptKeyDanglingPrefix ImageExporterOptKey = "dangling-name-prefix" + + // Creates additional image name with format @ + // Value: bool + OptKeyNameCanonical ImageExporterOptKey = "name-canonical" + + // Store the resulting image along with all of the content it references. + // Ignored if the worker doesn't have image store (e.g. OCI worker). + // Value: bool + OptKeyStore ImageExporterOptKey = "store" + + // Use OCI mediatypes instead of Docker in JSON configs. + // Value: bool + OptKeyOCITypes ImageExporterOptKey = "oci-mediatypes" + + // Force attestation to be attached. + // Value: bool + OptKeyForceInlineAttestations ImageExporterOptKey = "attestation-inline" + + // Mark layers as non-distributable if they are found to use a + // non-distributable media type. When this option is not set, the exporter + // will change the media type of the layer to a distributable one. + // Value: bool + OptKeyPreferNondistLayers ImageExporterOptKey = "prefer-nondist-layers" + + // Clamp produced timestamps. For more information see the + // SOURCE_DATE_EPOCH specification. + // Value: int (number of seconds since Unix epoch) + OptKeySourceDateEpoch ImageExporterOptKey = ImageExporterOptKey(commonexptypes.OptKeySourceDateEpoch) + + // Compression type for newly created and cached layers. + // estargz should be used with OptKeyOCITypes set to true. + // Value: string + OptKeyLayerCompression ImageExporterOptKey = "compression" + + // Force compression on all (including existing) layers. + // Value: bool + OptKeyForceCompression ImageExporterOptKey = "force-compression" + + // Compression level + // Value: int (0-9) for gzip and estargz + // Value: int (0-22) for zstd + OptKeyCompressionLevel ImageExporterOptKey = "compression-level" +) diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go index 4531360afa..c4d5721ea6 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go @@ -11,9 +11,7 @@ const ( ExporterImageConfigDigestKey = "containerimage.config.digest" ExporterImageDescriptorKey = "containerimage.descriptor" ExporterInlineCache = "containerimage.inlinecache" - ExporterBuildInfo = "containerimage.buildinfo" // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md ExporterPlatformsKey = "refs.platforms" - ExporterEpochKey = "source.date.epoch" ) // KnownRefMetadataKeys are the subset of exporter keys that can be suffixed by @@ -21,7 +19,6 @@ const ( var KnownRefMetadataKeys = []string{ ExporterImageConfigKey, ExporterInlineCache, - ExporterBuildInfo, } type Platforms struct { diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go b/vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go index a35d811d55..1af194b506 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go @@ -19,9 +19,10 @@ type HealthConfig struct { Test []string `json:",omitempty"` // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + StartInterval time.Duration `json:",omitempty"` // StartInterval is the time to wait between checks during the start period. // Retries is the number of consecutive failures needed to consider a container as unhealthy. // Zero means inherit. @@ -33,7 +34,6 @@ type ImageConfig struct { ocispecs.ImageConfig Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) // NetworkDisabled bool `json:",omitempty"` // Is network disabled // MacAddress string `json:",omitempty"` // Mac Address of the container diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/opts.go b/vendor/github.com/moby/buildkit/exporter/containerimage/opts.go index 4948eaad24..791f268afd 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/opts.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/opts.go @@ -1,30 +1,16 @@ package containerimage import ( + "context" "strconv" "time" cacheconfig "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/exporter/util/epoch" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - keyImageName = "name" - keyLayerCompression = "compression" - keyCompressionLevel = "compression-level" - keyForceCompression = "force-compression" - keyOCITypes = "oci-mediatypes" - keyBuildInfo = "buildinfo" - keyBuildInfoAttrs = "buildinfo-attrs" - keyForceInlineAttestations = "attestation-inline" - - // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was - // already found to use a non-distributable media type. - // When this option is not set, the exporter will change the media type of the layer to a distributable one. - keyPreferNondistLayers = "prefer-nondist-layers" ) type ImageCommitOpts struct { @@ -35,12 +21,9 @@ type ImageCommitOpts struct { Epoch *time.Time ForceInlineAttestations bool // force inline attestations to be attached - - BuildInfo bool // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md - BuildInfoAttrs bool // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md } -func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error) { +func (c *ImageCommitOpts) Load(ctx context.Context, opt map[string]string) (map[string]string, error) { rest := make(map[string]string) as, optb, err := ParseAnnotations(toBytesMap(opt)) @@ -54,32 +37,20 @@ func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error) return nil, err } + if c.RefCfg.Compression, err = compression.ParseAttributes(opt); err != nil { + return nil, err + } + for k, v := range opt { var err error - switch k { - case keyImageName: + switch exptypes.ImageExporterOptKey(k) { + case exptypes.OptKeyName: c.ImageName = v - case keyLayerCompression: - c.RefCfg.Compression.Type, err = compression.Parse(v) - case keyCompressionLevel: - ii, err2 := strconv.ParseInt(v, 10, 64) - if err != nil { - err = errors.Wrapf(err2, "non-int value %s specified for %s", v, k) - break - } - v := int(ii) - c.RefCfg.Compression.Level = &v - case keyForceCompression: - err = parseBoolWithDefault(&c.RefCfg.Compression.Force, k, v, true) - case keyOCITypes: + case exptypes.OptKeyOCITypes: err = parseBoolWithDefault(&c.OCITypes, k, v, true) - case keyBuildInfo: - err = parseBoolWithDefault(&c.BuildInfo, k, v, true) - case keyBuildInfoAttrs: - err = parseBoolWithDefault(&c.BuildInfoAttrs, k, v, false) - case keyForceInlineAttestations: + case exptypes.OptKeyForceInlineAttestations: err = parseBool(&c.ForceInlineAttestations, k, v) - case keyPreferNondistLayers: + case exptypes.OptKeyPreferNondistLayers: err = parseBool(&c.RefCfg.PreferNonDistributable, k, v) default: rest[k] = v @@ -91,11 +62,11 @@ func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error) } if c.RefCfg.Compression.Type.OnlySupportOCITypes() { - c.EnableOCITypes(c.RefCfg.Compression.Type.String()) + c.EnableOCITypes(ctx, c.RefCfg.Compression.Type.String()) } if c.RefCfg.Compression.Type.NeedsForceCompression() { - c.EnableForceCompression(c.RefCfg.Compression.Type.String()) + c.EnableForceCompression(ctx, c.RefCfg.Compression.Type.String()) } c.Annotations = c.Annotations.Merge(as) @@ -103,25 +74,25 @@ func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error) return rest, nil } -func (c *ImageCommitOpts) EnableOCITypes(reason string) { +func (c *ImageCommitOpts) EnableOCITypes(ctx context.Context, reason string) { if !c.OCITypes { message := "forcibly turning on oci-mediatype mode" if reason != "" { message += " for " + reason } - logrus.Warn(message) + bklog.G(ctx).Warn(message) c.OCITypes = true } } -func (c *ImageCommitOpts) EnableForceCompression(reason string) { +func (c *ImageCommitOpts) EnableForceCompression(ctx context.Context, reason string) { if !c.RefCfg.Compression.Force { message := "forcibly turning on force-compression mode" if reason != "" { message += " for " + reason } - logrus.Warn(message) + bklog.G(ctx).Warn(message) c.RefCfg.Compression.Force = true } diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go b/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go index 4cccd9db51..186f415b18 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go @@ -26,8 +26,6 @@ import ( "github.com/moby/buildkit/solver/result" attestationTypes "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/util/bklog" - "github.com/moby/buildkit/util/buildinfo" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/purl" @@ -36,6 +34,7 @@ import ( digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/package-url/packageurl-go" "github.com/pkg/errors" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -102,7 +101,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session } } if len(a.Index)+len(a.IndexDescriptor)+len(a.ManifestDescriptor) > 0 { - opts.EnableOCITypes("annotations") + opts.EnableOCITypes(ctx, "annotations") } } @@ -127,15 +126,6 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session return nil, err } - var dtbi []byte - if opts.BuildInfo { - if dtbi, err = buildinfo.Format(exptypes.ParseKey(inp.Metadata, exptypes.ExporterBuildInfo, p), buildinfo.FormatOpts{ - RemoveAttrs: !opts.BuildInfoAttrs, - }); err != nil { - return nil, err - } - } - annotations := opts.Annotations.Platform(nil) if len(annotations.Index) > 0 || len(annotations.IndexDescriptor) > 0 { return nil, errors.Errorf("index annotations not supported for single platform export") @@ -143,7 +133,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p) inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p) - mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, opts, ref, config, &remotes[0], annotations, inlineCache, dtbi, opts.Epoch, session.NewGroup(sessionID)) + mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, opts, ref, config, &remotes[0], annotations, inlineCache, opts.Epoch, session.NewGroup(sessionID)) if err != nil { return nil, err } @@ -159,7 +149,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session } if len(inp.Attestations) > 0 { - opts.EnableOCITypes("attestations") + opts.EnableOCITypes(ctx, "attestations") } refs := make([]cache.ImmutableRef, 0, len(inp.Refs)) @@ -178,19 +168,11 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session return nil, err } - idx := struct { - // MediaType is reserved in the OCI spec but - // excluded from go types. - MediaType string `json:"mediaType,omitempty"` - - ocispecs.Index - }{ - MediaType: ocispecs.MediaTypeImageIndex, - Index: ocispecs.Index{ - Annotations: opts.Annotations.Platform(nil).Index, - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, + idx := ocispecs.Index{ + MediaType: ocispecs.MediaTypeImageIndex, + Annotations: opts.Annotations.Platform(nil).Index, + Versioned: specs.Versioned{ + SchemaVersion: 2, }, } @@ -210,15 +192,6 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p) inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p) - var dtbi []byte - if opts.BuildInfo { - if dtbi, err = buildinfo.Format(exptypes.ParseKey(inp.Metadata, exptypes.ExporterBuildInfo, p), buildinfo.FormatOpts{ - RemoveAttrs: !opts.BuildInfoAttrs, - }); err != nil { - return nil, err - } - } - remote := &remotes[remotesMap[p.ID]] if remote == nil { remote = &solver.Remote{ @@ -226,7 +199,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session } } - desc, _, err := ic.commitDistributionManifest(ctx, opts, r, config, remote, opts.Annotations.Platform(&p.Platform), inlineCache, dtbi, opts.Epoch, session.NewGroup(sessionID)) + desc, _, err := ic.commitDistributionManifest(ctx, opts, r, config, remote, opts.Annotations.Platform(&p.Platform), inlineCache, opts.Epoch, session.NewGroup(sessionID)) if err != nil { return nil, err } @@ -263,7 +236,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session if name == "" { continue } - pl, err := purl.RefToPURL(name, &p.Platform) + pl, err := purl.RefToPURL(packageurl.TypeDocker, name, &p.Platform) if err != nil { return nil, err } @@ -350,7 +323,7 @@ func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefC return out, err } -func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *ImageCommitOpts, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, inlineCache []byte, buildInfo []byte, epoch *time.Time, sg session.Group) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) { +func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *ImageCommitOpts, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, inlineCache []byte, epoch *time.Time, sg session.Group) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) { if len(config) == 0 { var err error config, err = defaultImageConfig() @@ -369,7 +342,7 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *Ima return nil, nil, err } - config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, buildInfo, epoch) + config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, epoch) if err != nil { return nil, nil, err } @@ -386,24 +359,16 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *Ima configType = images.MediaTypeDockerSchema2Config } - mfst := struct { - // MediaType is reserved in the OCI spec but - // excluded from go types. - MediaType string `json:"mediaType,omitempty"` - - ocispecs.Manifest - }{ - MediaType: manifestType, - Manifest: ocispecs.Manifest{ - Annotations: annotations.Manifest, - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: ocispecs.Descriptor{ - Digest: configDigest, - Size: int64(len(config)), - MediaType: configType, - }, + mfst := ocispecs.Manifest{ + MediaType: manifestType, + Annotations: annotations.Manifest, + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, }, } @@ -411,9 +376,10 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *Ima "containerd.io/gc.ref.content.0": configDigest.String(), } - for _, desc := range remote.Descriptors { + for i, desc := range remote.Descriptors { desc.Annotations = RemoveInternalLayerAnnotations(desc.Annotations, opts.OCITypes) mfst.Layers = append(mfst.Layers, desc) + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = desc.Digest.String() } mfstJSON, err := json.MarshalIndent(mfst, "", " ") @@ -473,7 +439,7 @@ func (ic *ImageWriter) commitAttestationsManifest(ctx context.Context, opts *Ima } digest := digest.FromBytes(data) desc := ocispecs.Descriptor{ - MediaType: attestationTypes.MediaTypeDockerSchema2AttestationType, + MediaType: intoto.PayloadType, Digest: digest, Size: int64(len(data)), Annotations: map[string]string{ @@ -499,23 +465,15 @@ func (ic *ImageWriter) commitAttestationsManifest(ctx context.Context, opts *Ima MediaType: configType, } - mfst := struct { - // MediaType is reserved in the OCI spec but - // excluded from go types. - MediaType string `json:"mediaType,omitempty"` - - ocispecs.Manifest - }{ + mfst := ocispecs.Manifest{ MediaType: manifestType, - Manifest: ocispecs.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: ocispecs.Descriptor{ - Digest: configDigest, - Size: int64(len(config)), - MediaType: configType, - }, + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, }, } @@ -610,7 +568,7 @@ func parseHistoryFromConfig(dt []byte) ([]ocispecs.History, error) { return config.History, nil } -func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache []byte, buildInfo []byte, epoch *time.Time) ([]byte, error) { +func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache []byte, epoch *time.Time) ([]byte, error) { m := map[string]json.RawMessage{} if err := json.Unmarshal(dt, &m); err != nil { return nil, errors.Wrap(err, "failed to parse image config for patch") @@ -678,16 +636,6 @@ func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs m["moby.buildkit.cache.v0"] = dt } - if buildInfo != nil { - dt, err := json.Marshal(buildInfo) - if err != nil { - return nil, err - } - m[binfotypes.ImageConfigField] = dt - } else { - delete(m, binfotypes.ImageConfigField) - } - dt, err = json.Marshal(m) return dt, errors.Wrap(err, "failed to marshal config after patch") } @@ -774,7 +722,7 @@ func normalizeLayersAndHistory(ctx context.Context, remote *solver.Remote, histo } // convert between oci and docker media types (or vice versa) if needed - remote.Descriptors = compression.ConvertAllLayerMediaTypes(oci, remote.Descriptors...) + remote.Descriptors = compression.ConvertAllLayerMediaTypes(ctx, oci, remote.Descriptors...) return remote, history } diff --git a/vendor/github.com/moby/buildkit/exporter/exptypes/keys.go b/vendor/github.com/moby/buildkit/exporter/exptypes/keys.go new file mode 100644 index 0000000000..4b568154ff --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/exptypes/keys.go @@ -0,0 +1,15 @@ +package exptypes + +const ( + ExporterEpochKey = "source.date.epoch" +) + +type ExporterOptKey string + +// Options keys supported by all exporters. +var ( + // Clamp produced timestamps. For more information see the + // SOURCE_DATE_EPOCH specification. + // Value: int (number of seconds since Unix epoch) + OptKeySourceDateEpoch ExporterOptKey = "source-date-epoch" +) diff --git a/vendor/github.com/moby/buildkit/exporter/local/export.go b/vendor/github.com/moby/buildkit/exporter/local/export.go index 7d08b172e0..771b7aaf22 100644 --- a/vendor/github.com/moby/buildkit/exporter/local/export.go +++ b/vendor/github.com/moby/buildkit/exporter/local/export.go @@ -4,6 +4,7 @@ import ( "context" "os" "strings" + "sync" "time" "github.com/moby/buildkit/cache" @@ -20,10 +21,6 @@ import ( "golang.org/x/time/rate" ) -const ( - keyAttestationPrefix = "attestation-prefix" -) - type Opt struct { SessionManager *session.Manager } @@ -39,23 +36,12 @@ func New(opt Opt) (exporter.Exporter, error) { } func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { - tm, _, err := epoch.ParseExporterAttrs(opt) - if err != nil { - return nil, err - } - i := &localExporterInstance{ localExporter: e, - opts: CreateFSOpts{ - Epoch: tm, - }, } - - for k, v := range opt { - switch k { - case keyAttestationPrefix: - i.opts.AttestationPrefix = v - } + _, err := i.opts.Load(opt) + if err != nil { + return nil, err } return i, nil @@ -107,6 +93,9 @@ func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source now := time.Now().Truncate(time.Second) + visitedPath := map[string]string{} + var visitedMu sync.Mutex + export := func(ctx context.Context, k string, ref cache.ImmutableRef, attestations []exporter.Attestation) func() error { return func() error { outputFS, cleanup, err := CreateFS(ctx, sessionID, k, ref, attestations, now, e.opts) @@ -117,20 +106,43 @@ func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source defer cleanup() } + if !e.opts.PlatformSplit { + // check for duplicate paths + err = outputFS.Walk(ctx, func(p string, fi os.FileInfo, err error) error { + if fi.IsDir() { + return nil + } + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + visitedMu.Lock() + defer visitedMu.Unlock() + if vp, ok := visitedPath[p]; ok { + return errors.Errorf("cannot overwrite %s from %s with %s when split option is disabled", p, vp, k) + } + visitedPath[p] = k + return nil + }) + if err != nil { + return err + } + } + lbl := "copying files" if isMap { lbl += " " + k - st := fstypes.Stat{ - Mode: uint32(os.ModeDir | 0755), - Path: strings.Replace(k, "/", "_", -1), - } - if e.opts.Epoch != nil { - st.ModTime = e.opts.Epoch.UnixNano() - } - - outputFS, err = fsutil.SubDirFS([]fsutil.Dir{{FS: outputFS, Stat: st}}) - if err != nil { - return err + if e.opts.PlatformSplit { + st := fstypes.Stat{ + Mode: uint32(os.ModeDir | 0755), + Path: strings.Replace(k, "/", "_", -1), + } + if e.opts.Epoch != nil { + st.ModTime = e.opts.Epoch.UnixNano() + } + outputFS, err = fsutil.SubDirFS([]fsutil.Dir{{FS: outputFS, Stat: st}}) + if err != nil { + return err + } } } diff --git a/vendor/github.com/moby/buildkit/exporter/local/fs.go b/vendor/github.com/moby/buildkit/exporter/local/fs.go index c5a524aae3..d8e4703ac1 100644 --- a/vendor/github.com/moby/buildkit/exporter/local/fs.go +++ b/vendor/github.com/moby/buildkit/exporter/local/fs.go @@ -3,11 +3,13 @@ package local import ( "context" "encoding/json" + "fmt" "io" "io/fs" "os" "path" "strconv" + "strings" "time" "github.com/docker/docker/pkg/idtools" @@ -15,6 +17,7 @@ import ( "github.com/moby/buildkit/cache" "github.com/moby/buildkit/exporter" "github.com/moby/buildkit/exporter/attestation" + "github.com/moby/buildkit/exporter/util/epoch" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/result" @@ -25,9 +28,45 @@ import ( fstypes "github.com/tonistiigi/fsutil/types" ) +const ( + keyAttestationPrefix = "attestation-prefix" + // keyPlatformSplit is an exporter option which can be used to split result + // in subfolders when multiple platform references are exported. + keyPlatformSplit = "platform-split" +) + type CreateFSOpts struct { Epoch *time.Time AttestationPrefix string + PlatformSplit bool +} + +func (c *CreateFSOpts) Load(opt map[string]string) (map[string]string, error) { + rest := make(map[string]string) + c.PlatformSplit = true + + var err error + c.Epoch, opt, err = epoch.ParseExporterAttrs(opt) + if err != nil { + return nil, err + } + + for k, v := range opt { + switch k { + case keyAttestationPrefix: + c.AttestationPrefix = v + case keyPlatformSplit: + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value for %s: %s", keyPlatformSplit, v) + } + c.PlatformSplit = b + default: + rest[k] = v + } + } + + return rest, nil } func CreateFS(ctx context.Context, sessionID string, k string, ref cache.ImmutableRef, attestations []exporter.Attestation, defaultTime time.Time, opt CreateFSOpts) (fsutil.FS, func() error, error) { @@ -138,6 +177,11 @@ func CreateFS(ctx context.Context, sessionID string, k string, ref cache.Immutab } name := opt.AttestationPrefix + path.Base(attestations[i].Path) + if !opt.PlatformSplit { + nameExt := path.Ext(name) + namBase := strings.TrimSuffix(name, nameExt) + name = fmt.Sprintf("%s.%s%s", namBase, strings.Replace(k, "/", "_", -1), nameExt) + } if _, ok := names[name]; ok { return nil, nil, errors.Errorf("duplicate attestation path name %s", name) } diff --git a/vendor/github.com/moby/buildkit/exporter/oci/export.go b/vendor/github.com/moby/buildkit/exporter/oci/export.go index 60982f4daf..81ac7857de 100644 --- a/vendor/github.com/moby/buildkit/exporter/oci/export.go +++ b/vendor/github.com/moby/buildkit/exporter/oci/export.go @@ -11,9 +11,7 @@ import ( archiveexporter "github.com/containerd/containerd/images/archive" "github.com/containerd/containerd/leases" - "github.com/containerd/containerd/remotes" "github.com/docker/distribution/reference" - intoto "github.com/in-toto/in-toto-golang/in_toto" "github.com/moby/buildkit/cache" cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/exporter" @@ -29,6 +27,7 @@ import ( "github.com/moby/buildkit/util/progress" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "golang.org/x/sync/errgroup" "google.golang.org/grpc/codes" ) @@ -67,12 +66,11 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp RefCfg: cacheconfig.RefConfig{ Compression: compression.New(compression.Default), }, - BuildInfo: true, - OCITypes: e.opt.Variant == VariantOCI, + OCITypes: e.opt.Variant == VariantOCI, }, } - opt, err := i.opts.Load(opt) + opt, err := i.opts.Load(ctx, opt) if err != nil { return nil, err } @@ -208,40 +206,36 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source return nil, nil, err } - mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) + var refs []cache.ImmutableRef if src.Ref != nil { - remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) - if err != nil { - return nil, nil, err - } - remote := remotes[0] - // unlazy before tar export as the tar writer does not handle - // layer blobs in parallel (whereas unlazy does) - if unlazier, ok := remote.Provider.(cache.Unlazier); ok { - if err := unlazier.Unlazy(ctx); err != nil { - return nil, nil, err - } - } - for _, desc := range remote.Descriptors { - mprovider.Add(desc.Digest, remote.Provider) - } + refs = append(refs, src.Ref) } - if len(src.Refs) > 0 { - for _, r := range src.Refs { - remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + for _, ref := range src.Refs { + refs = append(refs, ref) + } + eg, egCtx := errgroup.WithContext(ctx) + mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) + for _, ref := range refs { + ref := ref + eg.Go(func() error { + remotes, err := ref.GetRemotes(egCtx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) if err != nil { - return nil, nil, err + return err } remote := remotes[0] if unlazier, ok := remote.Provider.(cache.Unlazier); ok { - if err := unlazier.Unlazy(ctx); err != nil { - return nil, nil, err + if err := unlazier.Unlazy(egCtx); err != nil { + return err } } for _, desc := range remote.Descriptors { mprovider.Add(desc.Digest, remote.Provider) } - } + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, nil, err } if e.tar { @@ -267,7 +261,6 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source } report(nil) } else { - ctx = remotes.WithMediaTypeKeyPrefix(ctx, intoto.PayloadType, "intoto") store := sessioncontent.NewCallerStore(caller, "export") if err != nil { return nil, nil, err diff --git a/vendor/github.com/moby/buildkit/exporter/tar/export.go b/vendor/github.com/moby/buildkit/exporter/tar/export.go index 4d136c89c1..7259f6b24a 100644 --- a/vendor/github.com/moby/buildkit/exporter/tar/export.go +++ b/vendor/github.com/moby/buildkit/exporter/tar/export.go @@ -3,7 +3,6 @@ package local import ( "context" "os" - "strconv" "strings" "time" @@ -20,15 +19,6 @@ import ( fstypes "github.com/tonistiigi/fsutil/types" ) -const ( - attestationPrefixKey = "attestation-prefix" - - // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was - // already found to use a non-distributable media type. - // When this option is not set, the exporter will change the media type of the layer to a distributable one. - preferNondistLayersKey = "prefer-nondist-layers" -) - type Opt struct { SessionManager *session.Manager } @@ -45,33 +35,18 @@ func New(opt Opt) (exporter.Exporter, error) { func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { li := &localExporterInstance{localExporter: e} - - tm, opt, err := epoch.ParseExporterAttrs(opt) + _, err := li.opts.Load(opt) if err != nil { return nil, err } - li.opts.Epoch = tm - - for k, v := range opt { - switch k { - case preferNondistLayersKey: - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value for %s: %s", preferNondistLayersKey, v) - } - li.preferNonDist = b - case attestationPrefixKey: - li.opts.AttestationPrefix = v - } - } + _ = opt return li, nil } type localExporterInstance struct { *localExporter - opts local.CreateFSOpts - preferNonDist bool + opts local.CreateFSOpts } func (e *localExporterInstance) Name() string { diff --git a/vendor/github.com/moby/buildkit/exporter/util/epoch/parse.go b/vendor/github.com/moby/buildkit/exporter/util/epoch/parse.go index 63f806e1b7..d5a0146081 100644 --- a/vendor/github.com/moby/buildkit/exporter/util/epoch/parse.go +++ b/vendor/github.com/moby/buildkit/exporter/util/epoch/parse.go @@ -5,14 +5,12 @@ import ( "time" "github.com/moby/buildkit/exporter" - "github.com/moby/buildkit/exporter/containerimage/exptypes" + commonexptypes "github.com/moby/buildkit/exporter/exptypes" "github.com/pkg/errors" ) const ( frontendSourceDateEpochArg = "build-arg:SOURCE_DATE_EPOCH" - - KeySourceDateEpoch = "source-date-epoch" ) func ParseBuildArgs(opt map[string]string) (string, bool) { @@ -27,7 +25,7 @@ func ParseExporterAttrs(opt map[string]string) (*time.Time, map[string]string, e for k, v := range opt { switch k { - case KeySourceDateEpoch: + case string(commonexptypes.OptKeySourceDateEpoch): var err error tm, err = parseTime(k, v) if err != nil { @@ -42,7 +40,7 @@ func ParseExporterAttrs(opt map[string]string) (*time.Time, map[string]string, e } func ParseSource(inp *exporter.Source) (*time.Time, bool, error) { - if v, ok := inp.Metadata[exptypes.ExporterEpochKey]; ok { + if v, ok := inp.Metadata[commonexptypes.ExporterEpochKey]; ok { epoch, err := parseTime("", string(v)) if err != nil { return nil, false, errors.Wrapf(err, "invalid SOURCE_DATE_EPOCH from frontend: %q", v) diff --git a/vendor/github.com/moby/buildkit/frontend/attestations/sbom/sbom.go b/vendor/github.com/moby/buildkit/frontend/attestations/sbom/sbom.go index 113797b213..c52229c284 100644 --- a/vendor/github.com/moby/buildkit/frontend/attestations/sbom/sbom.go +++ b/vendor/github.com/moby/buildkit/frontend/attestations/sbom/sbom.go @@ -31,14 +31,14 @@ const ( // build-contexts or multi-stage builds. Handling these separately allows the // scanner to optionally ignore these or to mark them as such in the // attestation. -type Scanner func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[llb.State], error) +type Scanner func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[*llb.State], error) -func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scanner string) (Scanner, error) { +func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scanner string, resolveOpt llb.ResolveImageConfigOpt) (Scanner, error) { if scanner == "" { return nil, nil } - _, dt, err := resolver.ResolveImageConfig(ctx, scanner, llb.ResolveImageConfigOpt{}) + scanner, _, dt, err := resolver.ResolveImageConfig(ctx, scanner, resolveOpt) if err != nil { return nil, err } @@ -55,7 +55,7 @@ func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scan return nil, errors.Errorf("scanner %s does not have cmd", scanner) } - return func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[llb.State], error) { + return func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[*llb.State], error) { var env []string env = append(env, cfg.Config.Env...) env = append(env, "BUILDKIT_SCAN_DESTINATION="+outDir) @@ -86,9 +86,9 @@ func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scan } stsbom := runscan.AddMount(outDir, llb.Scratch()) - return result.Attestation[llb.State]{ + return result.Attestation[*llb.State]{ Kind: gatewaypb.AttestationKindBundle, - Ref: stsbom, + Ref: &stsbom, Metadata: map[string][]byte{ result.AttestationReasonKey: []byte(result.AttestationReasonSBOM), result.AttestationSBOMCore: []byte(CoreSBOMName), @@ -100,7 +100,7 @@ func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scan }, nil } -func HasSBOM[T any](res *result.Result[T]) bool { +func HasSBOM[T comparable](res *result.Result[T]) bool { for _, as := range res.Attestations { for _, a := range as { if a.InToto.PredicateType == intoto.PredicateSPDX { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go index aafd9c9a73..40ab3de2c0 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go @@ -1,31 +1,18 @@ package builder import ( - "archive/tar" - "bytes" "context" - "encoding/csv" - "encoding/json" - "fmt" - "net" - "path" - "regexp" - "strconv" "strings" - "time" + "sync" "github.com/containerd/containerd/platforms" - "github.com/docker/distribution/reference" - "github.com/docker/go-units" - controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/containerimage/image" "github.com/moby/buildkit/frontend" - "github.com/moby/buildkit/frontend/attestations" "github.com/moby/buildkit/frontend/attestations/sbom" "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" - "github.com/moby/buildkit/frontend/dockerfile/dockerignore" "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/moby/buildkit/frontend/dockerui" "github.com/moby/buildkit/frontend/gateway/client" gwpb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/frontend/subrequests/outline" @@ -33,352 +20,32 @@ import ( "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/solver/result" - "github.com/moby/buildkit/util/gitutil" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "golang.org/x/sync/errgroup" ) const ( - DefaultLocalNameContext = "context" - DefaultLocalNameDockerfile = "dockerfile" - defaultDockerfileName = "Dockerfile" - dockerignoreFilename = ".dockerignore" - - buildArgPrefix = "build-arg:" - labelPrefix = "label:" - contextPrefix = "context:" - inputMetadataPrefix = "input-metadata:" - - keyTarget = "target" - keyFilename = "filename" - keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports - keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry - keyCgroupParent = "cgroup-parent" - keyContextSubDir = "contextsubdir" - keyForceNetwork = "force-network-mode" - keyGlobalAddHosts = "add-hosts" - keyHostname = "hostname" - keyImageResolveMode = "image-resolve-mode" - keyMultiPlatform = "multi-platform" - keyNameContext = "contextkey" - keyNameDockerfile = "dockerfilekey" - keyNoCache = "no-cache" - keyShmSize = "shm-size" - keyTargetPlatform = "platform" - keyUlimit = "ulimit" - keyRequestID = "requestid" - // Don't forget to update frontend documentation if you add // a new build-arg: frontend/dockerfile/docs/reference.md - keyCacheNSArg = "build-arg:BUILDKIT_CACHE_MOUNT_NS" - keyContextKeepGitDirArg = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR" - keyHostnameArg = "build-arg:BUILDKIT_SANDBOX_HOSTNAME" - keyMultiPlatformArg = "build-arg:BUILDKIT_MULTI_PLATFORM" - keySyntaxArg = "build-arg:BUILDKIT_SYNTAX" - keySourceDateEpoch = "build-arg:SOURCE_DATE_EPOCH" + keySyntaxArg = "build-arg:BUILDKIT_SYNTAX" ) -var httpPrefix = regexp.MustCompile(`^https?://`) - func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { - opts := c.BuildOpts().Opts - caps := c.BuildOpts().LLBCaps - gwcaps := c.BuildOpts().Caps - - if err := caps.Supports(pb.CapFileBase); err != nil { - return nil, errors.Wrap(err, "needs BuildKit 0.5 or later") + bc, err := dockerui.NewClient(c) + if err != nil { + return nil, err } - if opts["override-copy-image"] != "" { - return nil, errors.New("support for \"override-copy-image\" was removed in BuildKit 0.11") - } - if v, ok := opts["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok { - if b, err := strconv.ParseBool(v); err == nil && b { - return nil, errors.New("support for \"build-arg:BUILDKIT_DISABLE_FILEOP\" was removed in BuildKit 0.11") - } - } - + opts := bc.BuildOpts().Opts allowForward, capsError := validateCaps(opts["frontend.caps"]) if !allowForward && capsError != nil { return nil, capsError } - marshalOpts := []llb.ConstraintsOpt{llb.WithCaps(caps)} - - localNameContext := DefaultLocalNameContext - if v, ok := opts[keyNameContext]; ok { - localNameContext = v - } - - forceLocalDockerfile := false - localNameDockerfile := DefaultLocalNameDockerfile - if v, ok := opts[keyNameDockerfile]; ok { - forceLocalDockerfile = true - localNameDockerfile = v - } - - defaultBuildPlatform := platforms.DefaultSpec() - if workers := c.BuildOpts().Workers; len(workers) > 0 && len(workers[0].Platforms) > 0 { - defaultBuildPlatform = workers[0].Platforms[0] - } - - buildPlatforms := []ocispecs.Platform{defaultBuildPlatform} - targetPlatforms := []*ocispecs.Platform{nil} - if v := opts[keyTargetPlatform]; v != "" { - var err error - targetPlatforms, err = parsePlatforms(v) - if err != nil { - return nil, err - } - } - - resolveMode, err := parseResolveMode(opts[keyImageResolveMode]) + src, err := bc.ReadEntrypoint(ctx, "Dockerfile") if err != nil { return nil, err } - extraHosts, err := parseExtraHosts(opts[keyGlobalAddHosts]) - if err != nil { - return nil, errors.Wrap(err, "failed to parse additional hosts") - } - - shmSize, err := parseShmSize(opts[keyShmSize]) - if err != nil { - return nil, errors.Wrap(err, "failed to parse shm size") - } - - ulimit, err := parseUlimits(opts[keyUlimit]) - if err != nil { - return nil, errors.Wrap(err, "failed to parse ulimit") - } - - defaultNetMode, err := parseNetMode(opts[keyForceNetwork]) - if err != nil { - return nil, err - } - - filename := opts[keyFilename] - if filename == "" { - filename = defaultDockerfileName - } - - var ignoreCache []string - if v, ok := opts[keyNoCache]; ok { - if v == "" { - ignoreCache = []string{} // means all stages - } else { - ignoreCache = strings.Split(v, ",") - } - } - - name := "load build definition from " + filename - - filenames := []string{filename, filename + ".dockerignore"} - - // dockerfile is also supported casing moby/moby#10858 - if path.Base(filename) == defaultDockerfileName { - filenames = append(filenames, path.Join(path.Dir(filename), strings.ToLower(defaultDockerfileName))) - } - - src := llb.Local(localNameDockerfile, - llb.FollowPaths(filenames), - llb.SessionID(c.BuildOpts().SessionID), - llb.SharedKeyHint(localNameDockerfile), - dockerfile2llb.WithInternalName(name), - llb.Differ(llb.DiffNone, false), - ) - - var buildContext *llb.State - isNotLocalContext := false - keepGit := false - if v, err := strconv.ParseBool(opts[keyContextKeepGitDirArg]); err == nil { - keepGit = v - } - if st, ok := detectGitContext(opts[localNameContext], keepGit); ok { - if !forceLocalDockerfile { - src = *st - } - buildContext = st - } else if httpPrefix.MatchString(opts[localNameContext]) { - httpContext := llb.HTTP(opts[localNameContext], llb.Filename("context"), dockerfile2llb.WithInternalName("load remote build context")) - def, err := httpContext.Marshal(ctx, marshalOpts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal httpcontext") - } - res, err := c.Solve(ctx, client.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return nil, errors.Wrapf(err, "failed to resolve httpcontext") - } - - ref, err := res.SingleRef() - if err != nil { - return nil, err - } - - dt, err := ref.ReadFile(ctx, client.ReadRequest{ - Filename: "context", - Range: &client.FileRange{ - Length: 1024, - }, - }) - if err != nil { - return nil, errors.Wrapf(err, "failed to read downloaded context") - } - if isArchive(dt) { - bc := llb.Scratch().File(llb.Copy(httpContext, "/context", "/", &llb.CopyInfo{ - AttemptUnpack: true, - })) - if !forceLocalDockerfile { - src = bc - } - buildContext = &bc - } else { - filename = "context" - if !forceLocalDockerfile { - src = httpContext - } - buildContext = &httpContext - isNotLocalContext = true - } - } else if (&gwcaps).Supports(gwpb.CapFrontendInputs) == nil { - inputs, err := c.Inputs(ctx) - if err != nil { - return nil, errors.Wrapf(err, "failed to get frontend inputs") - } - - if !forceLocalDockerfile { - inputDockerfile, ok := inputs[DefaultLocalNameDockerfile] - if ok { - src = inputDockerfile - } - } - - inputCtx, ok := inputs[DefaultLocalNameContext] - if ok { - buildContext = &inputCtx - isNotLocalContext = true - } - } - - if buildContext != nil { - if sub, ok := opts[keyContextSubDir]; ok { - buildContext = scopeToSubDir(buildContext, sub) - } - } - - def, err := src.Marshal(ctx, marshalOpts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal local source") - } - - defVtx, err := def.Head() - if err != nil { - return nil, err - } - - var sourceMap *llb.SourceMap - - eg, ctx2 := errgroup.WithContext(ctx) - var dtDockerfile []byte - var dtDockerignore []byte - var dtDockerignoreDefault []byte - eg.Go(func() error { - res, err := c.Solve(ctx2, client.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return errors.Wrapf(err, "failed to resolve dockerfile") - } - - ref, err := res.SingleRef() - if err != nil { - return err - } - - dtDockerfile, err = ref.ReadFile(ctx2, client.ReadRequest{ - Filename: filename, - }) - if err != nil { - fallback := false - if path.Base(filename) == defaultDockerfileName { - var err1 error - dtDockerfile, err1 = ref.ReadFile(ctx2, client.ReadRequest{ - Filename: path.Join(path.Dir(filename), strings.ToLower(defaultDockerfileName)), - }) - if err1 == nil { - fallback = true - } - } - if !fallback { - return errors.Wrapf(err, "failed to read dockerfile") - } - } - - sourceMap = llb.NewSourceMap(&src, filename, dtDockerfile) - sourceMap.Definition = def - - dt, err := ref.ReadFile(ctx2, client.ReadRequest{ - Filename: filename + ".dockerignore", - }) - if err == nil { - dtDockerignore = dt - } - return nil - }) - var excludes []string - if !isNotLocalContext { - eg.Go(func() error { - dockerignoreState := buildContext - if dockerignoreState == nil { - st := llb.Local(localNameContext, - llb.SessionID(c.BuildOpts().SessionID), - llb.FollowPaths([]string{dockerignoreFilename}), - llb.SharedKeyHint(localNameContext+"-"+dockerignoreFilename), - dockerfile2llb.WithInternalName("load "+dockerignoreFilename), - llb.Differ(llb.DiffNone, false), - ) - dockerignoreState = &st - } - def, err := dockerignoreState.Marshal(ctx, marshalOpts...) - if err != nil { - return err - } - res, err := c.Solve(ctx2, client.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return err - } - ref, err := res.SingleRef() - if err != nil { - return err - } - dtDockerignoreDefault, err = ref.ReadFile(ctx2, client.ReadRequest{ - Filename: dockerignoreFilename, - }) - if err != nil { - return nil - } - return nil - }) - } - - if err := eg.Wait(); err != nil { - return nil, err - } - - if dtDockerignore == nil { - dtDockerignore = dtDockerignoreDefault - } - if dtDockerignore != nil { - excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dtDockerignore)) - if err != nil { - return nil, errors.Wrap(err, "failed to parse dockerignore") - } - } - if _, ok := opts["cmdline"]; !ok { if cmdline, ok := opts[keySyntaxArg]; ok { p := strings.SplitN(strings.TrimSpace(cmdline), " ", 2) @@ -387,10 +54,10 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { return nil, errors.Wrapf(err, "failed with %s = %s", keySyntaxArg, cmdline) } return res, err - } else if ref, cmdline, loc, ok := parser.DetectSyntax(dtDockerfile); ok { + } else if ref, cmdline, loc, ok := parser.DetectSyntax(src.Data); ok { res, err := forwardGateway(ctx, c, ref, cmdline) if err != nil && len(errdefs.Sources(err)) == 0 { - return nil, wrapSource(err, sourceMap, loc) + return nil, wrapSource(err, src.SourceMap, loc) } return res, err } @@ -400,231 +67,111 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { return nil, capsError } - if res, ok, err := checkSubRequest(ctx, opts); ok { - return res, err - } - - exportMap := len(targetPlatforms) > 1 - - if v := opts[keyMultiPlatformArg]; v != "" { - opts[keyMultiPlatform] = v - } - if v := opts[keyMultiPlatform]; v != "" { - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Errorf("invalid boolean value %s", v) - } - if !b && exportMap { - return nil, errors.Errorf("returning multiple target platforms is not allowed") - } - exportMap = b - } - - expPlatforms := &exptypes.Platforms{ - Platforms: make([]exptypes.Platform, len(targetPlatforms)), - } - res := client.NewResult() - - if v, ok := opts[keyHostnameArg]; ok && len(v) > 0 { - opts[keyHostname] = v - } - - epoch, err := parseSourceDateEpoch(opts[keySourceDateEpoch]) - if err != nil { - return nil, err - } - - target := opts[keyTarget] convertOpt := dockerfile2llb.ConvertOpt{ - Target: target, - MetaResolver: c, - BuildArgs: filter(opts, buildArgPrefix), - Labels: filter(opts, labelPrefix), - CacheIDNamespace: opts[keyCacheNSArg], - SessionID: c.BuildOpts().SessionID, - BuildContext: buildContext, - Excludes: excludes, - IgnoreCache: ignoreCache, - TargetPlatform: targetPlatforms[0], - BuildPlatforms: buildPlatforms, - ImageResolveMode: resolveMode, - PrefixPlatform: exportMap, - ExtraHosts: extraHosts, - ShmSize: shmSize, - Ulimit: ulimit, - CgroupParent: opts[keyCgroupParent], - ForceNetMode: defaultNetMode, - LLBCaps: &caps, - SourceMap: sourceMap, - Hostname: opts[keyHostname], - SourceDateEpoch: epoch, + Config: bc.Config, + Client: bc, + SourceMap: src.SourceMap, + MetaResolver: c, Warn: func(msg, url string, detail [][]byte, location *parser.Range) { - c.Warn(ctx, defVtx, msg, warnOpts(sourceMap, location, detail, url)) + src.Warn(ctx, msg, warnOpts(location, detail, url)) }, - ContextByName: contextByNameFunc(c, c.BuildOpts().SessionID), + } + + if res, ok, err := bc.HandleSubrequest(ctx, dockerui.RequestHandler{ + Outline: func(ctx context.Context) (*outline.Outline, error) { + return dockerfile2llb.Dockefile2Outline(ctx, src.Data, convertOpt) + }, + ListTargets: func(ctx context.Context) (*targets.List, error) { + return dockerfile2llb.ListTargets(ctx, src.Data) + }, + }); err != nil { + return nil, err + } else if ok { + return res, nil } defer func() { var el *parser.ErrorLocation if errors.As(err, &el) { - err = wrapSource(err, sourceMap, el.Location) + err = wrapSource(err, src.SourceMap, el.Location) } }() - if req, ok := opts[keyRequestID]; ok { - switch req { - case outline.SubrequestsOutlineDefinition.Name: - o, err := dockerfile2llb.Dockefile2Outline(ctx, dtDockerfile, convertOpt) - if err != nil { - return nil, err - } - return o.ToResult() - case targets.SubrequestsTargetsDefinition.Name: - targets, err := dockerfile2llb.ListTargets(ctx, dtDockerfile) - if err != nil { - return nil, err - } - return targets.ToResult() - default: - return nil, errdefs.NewUnsupportedSubrequestError(req) - } - } - var scanner sbom.Scanner - attests, err := attestations.Parse(opts) - if err != nil { - return nil, err - } - if attrs, ok := attests[attestations.KeyTypeSbom]; ok { - src, ok := attrs["generator"] - if !ok { - return nil, errors.Errorf("sbom scanner cannot be empty") - } - ref, err := reference.ParseNormalizedNamed(src) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse sbom scanner %s", src) - } - ref = reference.TagNameOnly(ref) - - scanner, err = sbom.CreateSBOMScanner(ctx, c, ref.String()) + if bc.SBOM != nil { + scanner, err = sbom.CreateSBOMScanner(ctx, c, bc.SBOM.Generator, llb.ResolveImageConfigOpt{ + ResolveMode: opts["image-resolve-mode"], + }) if err != nil { return nil, err } } - scanTargets := make([]*dockerfile2llb.SBOMTargets, len(targetPlatforms)) - eg, ctx2 = errgroup.WithContext(ctx) + scanTargets := sync.Map{} - for i, tp := range targetPlatforms { - func(i int, tp *ocispecs.Platform) { - eg.Go(func() (err error) { - opt := convertOpt - opt.TargetPlatform = tp - if i != 0 { - opt.Warn = nil - } - opt.ContextByName = contextByNameFunc(c, c.BuildOpts().SessionID) - st, img, scanTarget, err := dockerfile2llb.Dockerfile2LLB(ctx2, dtDockerfile, opt) - if err != nil { - return err - } + rb, err := bc.Build(ctx, func(ctx context.Context, platform *ocispecs.Platform, idx int) (client.Reference, *image.Image, error) { + opt := convertOpt + opt.TargetPlatform = platform + if idx != 0 { + opt.Warn = nil + } - def, err := st.Marshal(ctx2) - if err != nil { - return errors.Wrapf(err, "failed to marshal LLB definition") - } + st, img, scanTarget, err := dockerfile2llb.Dockerfile2LLB(ctx, src.Data, opt) + if err != nil { + return nil, nil, err + } - config, err := json.Marshal(img) - if err != nil { - return errors.Wrapf(err, "failed to marshal image config") - } + def, err := st.Marshal(ctx) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to marshal LLB definition") + } - var cacheImports []client.CacheOptionsEntry - // new API - if cacheImportsStr := opts[keyCacheImports]; cacheImportsStr != "" { - var cacheImportsUM []controlapi.CacheOptionsEntry - if err := json.Unmarshal([]byte(cacheImportsStr), &cacheImportsUM); err != nil { - return errors.Wrapf(err, "failed to unmarshal %s (%q)", keyCacheImports, cacheImportsStr) - } - for _, um := range cacheImportsUM { - cacheImports = append(cacheImports, client.CacheOptionsEntry{Type: um.Type, Attrs: um.Attrs}) - } - } - // old API - if cacheFromStr := opts[keyCacheFrom]; cacheFromStr != "" { - cacheFrom := strings.Split(cacheFromStr, ",") - for _, s := range cacheFrom { - im := client.CacheOptionsEntry{ - Type: "registry", - Attrs: map[string]string{ - "ref": s, - }, - } - // FIXME(AkihiroSuda): skip append if already exists - cacheImports = append(cacheImports, im) - } - } + r, err := c.Solve(ctx, client.SolveRequest{ + Definition: def.ToPB(), + CacheImports: bc.CacheImports, + }) + if err != nil { + return nil, nil, err + } - r, err := c.Solve(ctx2, client.SolveRequest{ - Definition: def.ToPB(), - CacheImports: cacheImports, - }) - if err != nil { - return err - } + ref, err := r.SingleRef() + if err != nil { + return nil, nil, err + } - ref, err := r.SingleRef() - if err != nil { - return err - } + p := platforms.DefaultSpec() + if platform != nil { + p = *platform + } + scanTargets.Store(platforms.Format(platforms.Normalize(p)), scanTarget) - p := platforms.DefaultSpec() - if tp != nil { - p = *tp - } - p = platforms.Normalize(p) - k := platforms.Format(p) - - if !exportMap { - res.AddMeta(exptypes.ExporterImageConfigKey, config) - res.SetRef(ref) - - expPlatforms.Platforms[i] = exptypes.Platform{ - ID: k, - Platform: p, - } - } else { - res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config) - res.AddRef(k, ref) - expPlatforms.Platforms[i] = exptypes.Platform{ - ID: k, - Platform: p, - } - } - scanTargets[i] = scanTarget - return nil - }) - }(i, tp) - } - - if err := eg.Wait(); err != nil { + return ref, img, nil + }) + if err != nil { return nil, err } if scanner != nil { - for i, p := range expPlatforms.Platforms { - target := scanTargets[i] + if err := rb.EachPlatform(ctx, func(ctx context.Context, id string, p ocispecs.Platform) error { + v, ok := scanTargets.Load(id) + if !ok { + return errors.Errorf("no scan targets for %s", id) + } + target, ok := v.(*dockerfile2llb.SBOMTargets) + if !ok { + return errors.Errorf("invalid scan targets for %T", v) + } var opts []llb.ConstraintsOpt if target.IgnoreCache { opts = append(opts, llb.IgnoreCache) } - att, err := scanner(ctx, p.ID, target.Core, target.Extras, opts...) + att, err := scanner(ctx, id, target.Core, target.Extras, opts...) if err != nil { - return nil, err + return err } - attSolve, err := result.ConvertAttestation(&att, func(st llb.State) (client.Reference, error) { + attSolve, err := result.ConvertAttestation(&att, func(st *llb.State) (client.Reference, error) { def, err := st.Marshal(ctx) if err != nil { return nil, err @@ -638,19 +185,16 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { return r.Ref, nil }) if err != nil { - return nil, err + return err } - res.AddAttestation(p.ID, *attSolve) + rb.AddAttestation(id, *attSolve) + return nil + }); err != nil { + return nil, err } } - dt, err := json.Marshal(expPlatforms) - if err != nil { - return nil, err - } - res.AddMeta(exptypes.ExporterPlatformsKey, dt) - - return res, nil + return rb.Finalize() } func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline string) (*client.Result, error) { @@ -686,173 +230,11 @@ func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline st }) } -func filter(opt map[string]string, key string) map[string]string { - m := map[string]string{} - for k, v := range opt { - if strings.HasPrefix(k, key) { - m[strings.TrimPrefix(k, key)] = v - } - } - return m -} - -func detectGitContext(ref string, keepGit bool) (*llb.State, bool) { - g, err := gitutil.ParseGitRef(ref) - if err != nil { - return nil, false - } - commit := g.Commit - if g.SubDir != "" { - commit += ":" + g.SubDir - } - gitOpts := []llb.GitOption{dockerfile2llb.WithInternalName("load git source " + ref)} - if keepGit { - gitOpts = append(gitOpts, llb.KeepGitDir()) - } - - st := llb.Git(g.Remote, commit, gitOpts...) - return &st, true -} - -func isArchive(header []byte) bool { - for _, m := range [][]byte{ - {0x42, 0x5A, 0x68}, // bzip2 - {0x1F, 0x8B, 0x08}, // gzip - {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, // xz - } { - if len(header) < len(m) { - continue - } - if bytes.Equal(m, header[:len(m)]) { - return true - } - } - - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} - -func parsePlatforms(v string) ([]*ocispecs.Platform, error) { - var pp []*ocispecs.Platform - for _, v := range strings.Split(v, ",") { - p, err := platforms.Parse(v) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse target platform %s", v) - } - p = platforms.Normalize(p) - pp = append(pp, &p) - } - return pp, nil -} - -func parseResolveMode(v string) (llb.ResolveMode, error) { - switch v { - case pb.AttrImageResolveModeDefault, "": - return llb.ResolveModeDefault, nil - case pb.AttrImageResolveModeForcePull: - return llb.ResolveModeForcePull, nil - case pb.AttrImageResolveModePreferLocal: - return llb.ResolveModePreferLocal, nil - default: - return 0, errors.Errorf("invalid image-resolve-mode: %s", v) - } -} - -func parseExtraHosts(v string) ([]llb.HostIP, error) { - if v == "" { - return nil, nil - } - out := make([]llb.HostIP, 0) - csvReader := csv.NewReader(strings.NewReader(v)) - fields, err := csvReader.Read() - if err != nil { - return nil, err - } - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return nil, errors.Errorf("invalid key-value pair %s", field) - } - key := strings.ToLower(parts[0]) - val := strings.ToLower(parts[1]) - ip := net.ParseIP(val) - if ip == nil { - return nil, errors.Errorf("failed to parse IP %s", val) - } - out = append(out, llb.HostIP{Host: key, IP: ip}) - } - return out, nil -} - -func parseShmSize(v string) (int64, error) { - if len(v) == 0 { - return 0, nil - } - kb, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return 0, err - } - return kb, nil -} - -func parseUlimits(v string) ([]pb.Ulimit, error) { - if v == "" { - return nil, nil - } - out := make([]pb.Ulimit, 0) - csvReader := csv.NewReader(strings.NewReader(v)) - fields, err := csvReader.Read() - if err != nil { - return nil, err - } - for _, field := range fields { - ulimit, err := units.ParseUlimit(field) - if err != nil { - return nil, err - } - out = append(out, pb.Ulimit{ - Name: ulimit.Name, - Soft: ulimit.Soft, - Hard: ulimit.Hard, - }) - } - return out, nil -} - -func parseNetMode(v string) (pb.NetMode, error) { - if v == "" { - return llb.NetModeSandbox, nil - } - switch v { - case "none": - return llb.NetModeNone, nil - case "host": - return llb.NetModeHost, nil - case "sandbox": - return llb.NetModeSandbox, nil - default: - return 0, errors.Errorf("invalid netmode %s", v) - } -} - -func scopeToSubDir(c *llb.State, dir string) *llb.State { - bc := llb.Scratch().File(llb.Copy(*c, dir, "/", &llb.CopyInfo{ - CopyDirContentsOnly: true, - })) - return &bc -} - -func warnOpts(sm *llb.SourceMap, r *parser.Range, detail [][]byte, url string) client.WarnOpts { +func warnOpts(r *parser.Range, detail [][]byte, url string) client.WarnOpts { opts := client.WarnOpts{Level: 1, Detail: detail, URL: url} if r == nil { return opts } - opts.SourceInfo = &pb.SourceInfo{ - Data: sm.Data, - Filename: sm.Filename, - Definition: sm.Definition.ToPB(), - } opts.Range = []*pb.Range{{ Start: pb.Position{ Line: int32(r.Start.Line), @@ -866,238 +248,6 @@ func warnOpts(sm *llb.SourceMap, r *parser.Range, detail [][]byte, url string) c return opts } -func contextByNameFunc(c client.Client, sessionID string) func(context.Context, string, string, *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, error) { - return func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, error) { - named, err := reference.ParseNormalizedNamed(name) - if err != nil { - return nil, nil, errors.Wrapf(err, "invalid context name %s", name) - } - name = strings.TrimSuffix(reference.FamiliarString(named), ":latest") - - if p == nil { - pp := platforms.Normalize(platforms.DefaultSpec()) - p = &pp - } - if p != nil { - pname := name + "::" + platforms.Format(platforms.Normalize(*p)) - st, img, err := contextByName(ctx, c, sessionID, name, pname, p, resolveMode) - if err != nil { - return nil, nil, err - } - if st != nil { - return st, img, nil - } - } - return contextByName(ctx, c, sessionID, name, name, p, resolveMode) - } -} - -func contextByName(ctx context.Context, c client.Client, sessionID, name string, pname string, platform *ocispecs.Platform, resolveMode string) (*llb.State, *dockerfile2llb.Image, error) { - opts := c.BuildOpts().Opts - v, ok := opts[contextPrefix+pname] - if !ok { - return nil, nil, nil - } - - vv := strings.SplitN(v, ":", 2) - if len(vv) != 2 { - return nil, nil, errors.Errorf("invalid context specifier %s for %s", v, pname) - } - // allow git@ without protocol for SSH URLs for backwards compatibility - if strings.HasPrefix(vv[0], "git@") { - vv[0] = "git" - } - switch vv[0] { - case "docker-image": - ref := strings.TrimPrefix(vv[1], "//") - if ref == "scratch" { - st := llb.Scratch() - return &st, nil, nil - } - - imgOpt := []llb.ImageOption{ - llb.WithCustomName("[context " + pname + "] " + ref), - } - if platform != nil { - imgOpt = append(imgOpt, llb.Platform(*platform)) - } - - named, err := reference.ParseNormalizedNamed(ref) - if err != nil { - return nil, nil, err - } - - named = reference.TagNameOnly(named) - - _, data, err := c.ResolveImageConfig(ctx, named.String(), llb.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: resolveMode, - LogName: fmt.Sprintf("[context %s] load metadata for %s", pname, ref), - ResolverType: llb.ResolverTypeRegistry, - }) - if err != nil { - return nil, nil, err - } - - var img dockerfile2llb.Image - if err := json.Unmarshal(data, &img); err != nil { - return nil, nil, err - } - img.Created = nil - - st := llb.Image(ref, imgOpt...) - st, err = st.WithImageConfig(data) - if err != nil { - return nil, nil, err - } - return &st, &img, nil - case "git": - st, ok := detectGitContext(v, true) - if !ok { - return nil, nil, errors.Errorf("invalid git context %s", v) - } - return st, nil, nil - case "http", "https": - st, ok := detectGitContext(v, true) - if !ok { - httpst := llb.HTTP(v, llb.WithCustomName("[context "+pname+"] "+v)) - st = &httpst - } - return st, nil, nil - case "oci-layout": - refSpec := strings.TrimPrefix(vv[1], "//") - ref, err := reference.Parse(refSpec) - if err != nil { - return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", refSpec) - } - named, ok := ref.(reference.Named) - if !ok { - return nil, nil, errors.Errorf("oci-layout reference %q has no name", ref.String()) - } - dgstd, ok := named.(reference.Digested) - if !ok { - return nil, nil, errors.Errorf("oci-layout reference %q has no digest", named.String()) - } - - // for the dummy ref primarily used in log messages, we can use the - // original name, since the store key may not be significant - dummyRef, err := reference.ParseNormalizedNamed(name) - if err != nil { - return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", name) - } - dummyRef, err = reference.WithDigest(dummyRef, dgstd.Digest()) - if err != nil { - return nil, nil, errors.Wrapf(err, "could not wrap %q with digest", name) - } - - _, data, err := c.ResolveImageConfig(ctx, dummyRef.String(), llb.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: resolveMode, - LogName: fmt.Sprintf("[context %s] load metadata for %s", pname, dummyRef.String()), - ResolverType: llb.ResolverTypeOCILayout, - Store: llb.ResolveImageConfigOptStore{ - SessionID: sessionID, - StoreID: named.Name(), - }, - }) - if err != nil { - return nil, nil, err - } - - var img dockerfile2llb.Image - if err := json.Unmarshal(data, &img); err != nil { - return nil, nil, errors.Wrap(err, "could not parse oci-layout image config") - } - - ociOpt := []llb.OCILayoutOption{ - llb.WithCustomName("[context " + pname + "] OCI load from client"), - llb.OCIStore(c.BuildOpts().SessionID, named.Name()), - } - if platform != nil { - ociOpt = append(ociOpt, llb.Platform(*platform)) - } - st := llb.OCILayout( - dummyRef.String(), - ociOpt..., - ) - st, err = st.WithImageConfig(data) - if err != nil { - return nil, nil, err - } - return &st, &img, nil - case "local": - st := llb.Local(vv[1], - llb.SessionID(c.BuildOpts().SessionID), - llb.FollowPaths([]string{dockerignoreFilename}), - llb.SharedKeyHint("context:"+pname+"-"+dockerignoreFilename), - llb.WithCustomName("[context "+pname+"] load "+dockerignoreFilename), - llb.Differ(llb.DiffNone, false), - ) - def, err := st.Marshal(ctx) - if err != nil { - return nil, nil, err - } - res, err := c.Solve(ctx, client.SolveRequest{ - Evaluate: true, - Definition: def.ToPB(), - }) - if err != nil { - return nil, nil, err - } - ref, err := res.SingleRef() - if err != nil { - return nil, nil, err - } - dt, _ := ref.ReadFile(ctx, client.ReadRequest{ - Filename: dockerignoreFilename, - }) // error ignored - var excludes []string - if len(dt) != 0 { - excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dt)) - if err != nil { - return nil, nil, err - } - } - st = llb.Local(vv[1], - llb.WithCustomName("[context "+pname+"] load from client"), - llb.SessionID(c.BuildOpts().SessionID), - llb.SharedKeyHint("context:"+pname), - llb.ExcludePatterns(excludes), - ) - return &st, nil, nil - case "input": - inputs, err := c.Inputs(ctx) - if err != nil { - return nil, nil, err - } - st, ok := inputs[vv[1]] - if !ok { - return nil, nil, errors.Errorf("invalid input %s for %s", vv[1], pname) - } - md, ok := opts[inputMetadataPrefix+vv[1]] - if ok { - m := make(map[string][]byte) - if err := json.Unmarshal([]byte(md), &m); err != nil { - return nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md) - } - var img *dockerfile2llb.Image - if dtic, ok := m[exptypes.ExporterImageConfigKey]; ok { - st, err = st.WithImageConfig(dtic) - if err != nil { - return nil, nil, err - } - if err := json.Unmarshal(dtic, &img); err != nil { - return nil, nil, errors.Wrapf(err, "failed to parse image config for %s", pname) - } - } - return &st, img, nil - } - return &st, nil, nil - default: - return nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], pname) - } -} - func wrapSource(err error, sm *llb.SourceMap, ranges []parser.Range) error { if sm == nil { return err @@ -1106,6 +256,7 @@ func wrapSource(err error, sm *llb.SourceMap, ranges []parser.Range) error { Info: &pb.SourceInfo{ Data: sm.Data, Filename: sm.Filename, + Language: sm.Language, Definition: sm.Definition.ToPB(), }, Ranges: make([]*pb.Range, 0, len(ranges)), @@ -1124,15 +275,3 @@ func wrapSource(err error, sm *llb.SourceMap, ranges []parser.Range) error { } return errdefs.WithSource(err, s) } - -func parseSourceDateEpoch(v string) (*time.Time, error) { - if v == "" { - return nil, nil - } - sde, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, errors.Wrapf(err, "invalid SOURCE_DATE_EPOCH: %s", v) - } - tm := time.Unix(sde, 0).UTC() - return &tm, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go deleted file mode 100644 index 8449530238..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go +++ /dev/null @@ -1,54 +0,0 @@ -package builder - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/frontend/subrequests" - "github.com/moby/buildkit/frontend/subrequests/outline" - "github.com/moby/buildkit/frontend/subrequests/targets" - "github.com/moby/buildkit/solver/errdefs" -) - -func checkSubRequest(ctx context.Context, opts map[string]string) (*client.Result, bool, error) { - req, ok := opts[keyRequestID] - if !ok { - return nil, false, nil - } - switch req { - case subrequests.RequestSubrequestsDescribe: - res, err := describe() - return res, true, err - case outline.RequestSubrequestsOutline, targets.RequestTargets: // handled later - return nil, false, nil - default: - return nil, true, errdefs.NewUnsupportedSubrequestError(req) - } -} - -func describe() (*client.Result, error) { - all := []subrequests.Request{ - outline.SubrequestsOutlineDefinition, - targets.SubrequestsTargetsDefinition, - subrequests.SubrequestsDescribeDefinition, - } - dt, err := json.MarshalIndent(all, "", " ") - if err != nil { - return nil, err - } - - b := bytes.NewBuffer(nil) - if err := subrequests.PrintDescribe(dt, b); err != nil { - return nil, err - } - - res := client.NewResult() - res.Metadata = map[string][]byte{ - "result.json": dt, - "result.txt": b.Bytes(), - "version": []byte(subrequests.SubrequestsDescribeDefinition.Version), - } - return res, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go index 6476267e2d..738ebf7d05 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go @@ -24,12 +24,12 @@ import ( "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/moby/buildkit/frontend/dockerui" "github.com/moby/buildkit/frontend/subrequests/outline" "github.com/moby/buildkit/frontend/subrequests/targets" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" "github.com/moby/buildkit/util/gitutil" "github.com/moby/buildkit/util/suggest" "github.com/moby/buildkit/util/system" @@ -41,9 +41,8 @@ import ( ) const ( - emptyImageName = "scratch" - defaultContextLocalName = "context" - historyComment = "buildkit.dockerfile.v0" + emptyImageName = "scratch" + historyComment = "buildkit.dockerfile.v0" sbomScanContext = "BUILDKIT_SBOM_SCAN_CONTEXT" sbomScanStage = "BUILDKIT_SBOM_SCAN_STAGE" @@ -55,34 +54,13 @@ var nonEnvArgs = map[string]struct{}{ } type ConvertOpt struct { - Target string - MetaResolver llb.ImageMetaResolver - BuildArgs map[string]string - Labels map[string]string - SessionID string - BuildContext *llb.State - Excludes []string - // IgnoreCache contains names of the stages that should not use build cache. - // Empty slice means ignore cache for all stages. Nil doesn't disable cache. - IgnoreCache []string - // CacheIDNamespace scopes the IDs for different cache mounts - CacheIDNamespace string - ImageResolveMode llb.ResolveMode - TargetPlatform *ocispecs.Platform - BuildPlatforms []ocispecs.Platform - PrefixPlatform bool - ExtraHosts []llb.HostIP - ShmSize int64 - Ulimit []pb.Ulimit - CgroupParent string - ForceNetMode pb.NetMode - LLBCaps *apicaps.CapSet - ContextLocalName string - SourceMap *llb.SourceMap - Hostname string - SourceDateEpoch *time.Time - Warn func(short, url string, detail [][]byte, location *parser.Range) - ContextByName func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, error) + dockerui.Config + Client *dockerui.Client + SourceMap *llb.SourceMap + TargetPlatform *ocispecs.Platform + MetaResolver llb.ImageMetaResolver + LLBCaps *apicaps.CapSet + Warn func(short, url string, detail [][]byte, location *parser.Range) } type SBOMTargets struct { @@ -92,7 +70,7 @@ type SBOMTargets struct { IgnoreCache bool } -func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, *SBOMTargets, error) { +func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *image.Image, *SBOMTargets, error) { ds, err := toDispatchState(ctx, dt, opt) if err != nil { return nil, nil, nil, err @@ -158,35 +136,36 @@ func ListTargets(ctx context.Context, dt []byte) (*targets.List, error) { } func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchState, error) { - contextByName := opt.ContextByName - opt.ContextByName = func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, error) { - if !strings.EqualFold(name, "scratch") && !strings.EqualFold(name, "context") { - if contextByName != nil { - if p == nil { - p = opt.TargetPlatform - } - st, img, err := contextByName(ctx, name, resolveMode, p) - if err != nil { - return nil, nil, err - } - return st, img, nil - } - } - return nil, nil, nil - } - if len(dt) == 0 { return nil, errors.Errorf("the Dockerfile cannot be empty") } - if opt.ContextLocalName == "" { - opt.ContextLocalName = defaultContextLocalName + namedContext := func(ctx context.Context, name string, copt dockerui.ContextOpt) (*llb.State, *image.Image, error) { + if opt.Client == nil { + return nil, nil, nil + } + if !strings.EqualFold(name, "scratch") && !strings.EqualFold(name, "context") { + if copt.Platform == nil { + copt.Platform = opt.TargetPlatform + } + st, img, err := opt.Client.NamedContext(ctx, name, copt) + if err != nil { + return nil, nil, err + } + return st, img, nil + } + return nil, nil, nil } if opt.Warn == nil { opt.Warn = func(string, string, [][]byte, *parser.Range) {} } + if opt.Client != nil && opt.LLBCaps == nil { + caps := opt.Client.BuildOpts().LLBCaps + opt.LLBCaps = &caps + } + platformOpt := buildPlatformOpt(&opt) optMetaArgs := getPlatformArgs(platformOpt) @@ -254,9 +233,9 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS deps: make(map[*dispatchState]struct{}), ctxPaths: make(map[string]struct{}), stageName: st.Name, - prefixPlatform: opt.PrefixPlatform, + prefixPlatform: opt.MultiPlatformRequested, outline: outline.clone(), - epoch: opt.SourceDateEpoch, + epoch: opt.Epoch, } if v := st.Platform; v != "" { @@ -276,7 +255,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } if st.Name != "" { - s, img, err := opt.ContextByName(ctx, st.Name, opt.ImageResolveMode.String(), ds.platform) + s, img, err := namedContext(ctx, st.Name, dockerui.ContextOpt{Platform: ds.platform, ResolveMode: opt.ImageResolveMode.String()}) if err != nil { return nil, err } @@ -284,7 +263,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS ds.noinit = true ds.state = *s if img != nil { - ds.image = clampTimes(*img, opt.SourceDateEpoch) + ds.image = clampTimes(*img, opt.Epoch) if img.Architecture != "" && img.OS != "" { ds.platform = &ocispecs.Platform{ OS: img.OS, @@ -321,17 +300,8 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } } ds.cmdTotal = total - - if opt.IgnoreCache != nil { - if len(opt.IgnoreCache) == 0 { - ds.ignoreCache = true - } else if st.Name != "" { - for _, n := range opt.IgnoreCache { - if strings.EqualFold(n, st.Name) { - ds.ignoreCache = true - } - } - } + if opt.Client != nil { + ds.ignoreCache = opt.Client.IsNoCache(st.Name) } } @@ -381,6 +351,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } } + baseCtx := ctx eg, ctx := errgroup.WithContext(ctx) for i, d := range allDispatchStates.states { reachable := isReachable(target, d) @@ -389,6 +360,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS if d.stage.BaseName == emptyImageName { d.state = llb.Scratch() d.image = emptyImage(platformOpt.targetPlatform) + d.platform = &platformOpt.targetPlatform continue } func(i int, d *dispatchState) { @@ -410,7 +382,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS d.stage.BaseName = reference.TagNameOnly(ref).String() var isScratch bool - st, img, err := opt.ContextByName(ctx, d.stage.BaseName, opt.ImageResolveMode.String(), platform) + st, img, err := namedContext(ctx, d.stage.BaseName, dockerui.ContextOpt{ResolveMode: opt.ImageResolveMode.String(), Platform: platform}) if err != nil { return err } @@ -426,20 +398,28 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } if reachable { prefix := "[" - if opt.PrefixPlatform && platform != nil { + if opt.MultiPlatformRequested && platform != nil { prefix += platforms.Format(*platform) + " " } prefix += "internal]" - dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, llb.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: opt.ImageResolveMode.String(), - LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName), - ResolverType: llb.ResolverTypeRegistry, + mutRef, dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, llb.ResolveImageConfigOpt{ + Platform: platform, + ResolveMode: opt.ImageResolveMode.String(), + LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName), + ResolverType: llb.ResolverTypeRegistry, + SourcePolicies: nil, }) if err != nil { return suggest.WrapError(errors.Wrap(err, origName), origName, append(allStageNames, commonImageNames()...), true) } - var img Image + + if ref.String() != mutRef { + ref, err = reference.ParseNormalizedNamed(mutRef) + if err != nil { + return errors.Wrapf(err, "failed to parse ref %q", mutRef) + } + } + var img image.Image if err := json.Unmarshal(dt, &img); err != nil { return errors.Wrap(err, "failed to parse image config") } @@ -466,16 +446,6 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } } } - if !isScratch { - // if image not scratch set original image name as ref - // and actual reference as alias in binfotypes.Source - d.buildInfo.Sources = append(d.buildInfo.Sources, binfotypes.Source{ - Type: binfotypes.SourceTypeDockerImage, - Ref: origName, - Alias: ref.String(), - Pin: dgst.String(), - }) - } d.image = img } if isScratch { @@ -485,7 +455,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS dfCmd(d.stage.SourceCode), llb.Platform(*platform), opt.ImageResolveMode, - llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.PrefixPlatform, platform, nil)), + llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.MultiPlatformRequested, platform, nil)), location(opt.SourceMap, d.stage.Location), ) } @@ -500,6 +470,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS return nil, err } + ctx = baseCtx buildContext := &mutableOutput{} ctxPaths := map[string]struct{}{} @@ -516,11 +487,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS // make sure that PATH is always set if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok { - var pathOS string - if d.platform != nil { - pathOS = d.platform.OS - } - d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(pathOS)) + d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(d.platform.OS)) } // initialize base metadata from image conf @@ -541,14 +508,12 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS return nil, parser.WithLocation(err, d.stage.Location) } } - d.state = d.state.Network(opt.ForceNetMode) - + d.state = d.state.Network(opt.NetworkMode) opt := dispatchOpt{ allDispatchStates: allDispatchStates, metaArgs: optMetaArgs, buildArgValues: opt.BuildArgs, shlex: shlex, - sessionID: opt.SessionID, buildContext: llb.NewState(buildContext), proxyEnv: proxyEnv, cacheIDNamespace: opt.CacheIDNamespace, @@ -556,7 +521,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS targetPlatform: platformOpt.targetPlatform, extraHosts: opt.ExtraHosts, shmSize: opt.ShmSize, - ulimit: opt.Ulimit, + ulimit: opt.Ulimits, cgroupParent: opt.CgroupParent, llbCaps: opt.LLBCaps, sourceMap: opt.SourceMap, @@ -598,21 +563,17 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS target.image.Config.Labels[k] = v } - opts := []llb.LocalOption{ - llb.SessionID(opt.SessionID), - llb.ExcludePatterns(opt.Excludes), - llb.SharedKeyHint(opt.ContextLocalName), - WithInternalName("load build context"), - } + opts := []llb.LocalOption{} if includePatterns := normalizeContextPaths(ctxPaths); includePatterns != nil { opts = append(opts, llb.FollowPaths(includePatterns)) } - - bc := llb.Local(opt.ContextLocalName, opts...) - if opt.BuildContext != nil { - bc = *opt.BuildContext + if opt.Client != nil { + bctx, err := opt.Client.MainContext(ctx, opts...) + if err != nil { + return nil, err + } + buildContext.Output = bctx.Output() } - buildContext.Output = bc.Output() defaults := []llb.ConstraintsOpt{ llb.Platform(platformOpt.targetPlatform), @@ -678,7 +639,6 @@ type dispatchOpt struct { metaArgs []instructions.KeyValuePairOptional buildArgValues map[string]string shlex *shell.Lex - sessionID string buildContext llb.State proxyEnv *llb.ProxyEnv cacheIDNamespace string @@ -809,7 +769,7 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { type dispatchState struct { opt dispatchOpt state llb.State - image Image + image image.Image platform *ocispecs.Platform stage instructions.Stage base *dispatchState @@ -825,7 +785,6 @@ type dispatchState struct { cmdIndex int cmdTotal int prefixPlatform bool - buildInfo binfotypes.BuildInfo outline outlineCapture epoch *time.Time scanStage bool @@ -936,7 +895,8 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE } st := llb.Scratch().Dir(sourcePath).File( llb.Mkfile(f, 0755, []byte(data)), - WithInternalName("preparing inline document"), + dockerui.WithInternalName("preparing inline document"), + llb.Platform(*d.platform), ) mount := llb.AddMount(destPath, st, llb.SourcePath(sourcePath), llb.Readonly) @@ -1040,12 +1000,20 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE } func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool, opt *dispatchOpt) error { - d.state = d.state.Dir(c.Path) - wd := c.Path - if !path.IsAbs(c.Path) { - wd = path.Join("/", d.image.Config.WorkingDir, wd) + wd, err := system.NormalizeWorkdir(d.image.Config.WorkingDir, c.Path, d.platform.OS) + if err != nil { + return errors.Wrap(err, "normalizing workdir") } + + // NormalizeWorkdir returns paths with platform specific separators. For Windows + // this will be of the form: \some\path, which is needed later when we pass it to + // HCS. d.image.Config.WorkingDir = wd + + // From this point forward, we can use UNIX style paths. + wd = system.ToSlash(wd, d.platform.OS) + d.state = d.state.Dir(wd) + if commit { withLayer := false if wd != "/" { @@ -1064,6 +1032,7 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo d.state = d.state.File(llb.Mkdir(wd, 0755, mkdirOpt...), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, c.String(), env)), d.prefixPlatform, &platform, env)), location(opt.sourceMap, c.Location()), + llb.Platform(*d.platform), ) withLayer = true } @@ -1073,11 +1042,11 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo } func dispatchCopy(d *dispatchState, cfg copyConfig) error { - pp, err := pathRelativeToWorkingDir(d.state, cfg.params.DestPath) + dest, err := pathRelativeToWorkingDir(d.state, cfg.params.DestPath, *d.platform) if err != nil { return err } - dest := path.Join("/", pp) + if cfg.params.DestPath == "." || cfg.params.DestPath == "" || cfg.params.DestPath[len(cfg.params.DestPath)-1] == filepath.Separator { dest += string(filepath.Separator) } @@ -1101,9 +1070,6 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { if !cfg.isAddCommand { return errors.New("checksum can't be specified for COPY") } - if !addChecksumEnabled { - return errors.New("instruction 'ADD --checksum=' requires the labs channel") - } if len(cfg.params.SourcePaths) != 1 { return errors.New("checksum can't be specified for multiple sources") } @@ -1128,9 +1094,6 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { if !cfg.isAddCommand { return errors.New("source can't be a git ref for COPY") } - if !addGitEnabled { - return errors.New("instruction ADD requires the labs channel") - } // TODO: print a warning (not an error) if gitRef.UnencryptedTCP is true commit := gitRef.Commit if gitRef.SubDir != "" { @@ -1181,6 +1144,11 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { a = a.Copy(st, f, dest, opts...) } } else { + src, err = system.NormalizePath("/", src, d.platform.OS, false) + if err != nil { + return errors.Wrap(err, "removing drive letter") + } + opts := append([]llb.CopyOption{&llb.CopyInfo{ Mode: mode, FollowSymlinks: true, @@ -1192,9 +1160,9 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { }}, copyOpt...) if a == nil { - a = llb.Copy(cfg.source, filepath.Join("/", src), dest, opts...) + a = llb.Copy(cfg.source, src, dest, opts...) } else { - a = a.Copy(cfg.source, filepath.Join("/", src), dest, opts...) + a = a.Copy(cfg.source, src, dest, opts...) } } } @@ -1203,10 +1171,14 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { commitMessage.WriteString(" <<" + src.Path) data := src.Data - f := src.Path + f, err := system.CheckSystemDriveAndRemoveDriveLetter(src.Path, d.platform.OS) + if err != nil { + return errors.Wrap(err, "removing drive letter") + } st := llb.Scratch().File( - llb.Mkfile(f, 0664, []byte(data)), - WithInternalName("preparing inline document"), + llb.Mkfile(f, 0644, []byte(data)), + dockerui.WithInternalName("preparing inline document"), + llb.Platform(*d.platform), ) opts := append([]llb.CopyOption{&llb.CopyInfo{ @@ -1215,9 +1187,9 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { }}, copyOpt...) if a == nil { - a = llb.Copy(st, f, dest, opts...) + a = llb.Copy(st, system.ToSlash(f, d.platform.OS), dest, opts...) } else { - a = a.Copy(st, f, dest, opts...) + a = a.Copy(st, filepath.ToSlash(f), dest, opts...) } } @@ -1248,7 +1220,9 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { d.cmdIndex-- // prefixCommand increases it pgName := prefixCommand(d, name, d.prefixPlatform, &platform, env) - var copyOpts []llb.ConstraintsOpt + copyOpts := []llb.ConstraintsOpt{ + llb.Platform(*d.platform), + } copy(copyOpts, fileOpt) copyOpts = append(copyOpts, llb.ProgressGroup(pgID, pgName, true)) @@ -1307,7 +1281,7 @@ func dispatchCmd(d *dispatchState, c *instructions.CmdCommand) error { args = withShell(d.image, args) } d.image.Config.Cmd = args - d.image.Config.ArgsEscaped = true + d.image.Config.ArgsEscaped = true //nolint:staticcheck // ignore SA1019: field is deprecated in OCI Image spec, but used for backward-compatibility with Docker image spec. d.cmdSet = true return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil, d.epoch) } @@ -1326,11 +1300,12 @@ func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) err func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error { d.image.Config.Healthcheck = &image.HealthConfig{ - Test: c.Health.Test, - Interval: c.Health.Interval, - Timeout: c.Health.Timeout, - StartPeriod: c.Health.StartPeriod, - Retries: c.Health.Retries, + Test: c.Health.Test, + Interval: c.Health.Interval, + Timeout: c.Health.Timeout, + StartPeriod: c.Health.StartPeriod, + StartInterval: c.Health.StartInterval, + Retries: c.Health.Retries, } return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil, d.epoch) } @@ -1440,15 +1415,24 @@ func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instru return commitToHistory(&d.image, "ARG "+strings.Join(commitStrs, " "), false, nil, d.epoch) } -func pathRelativeToWorkingDir(s llb.State, p string) (string, error) { - if path.IsAbs(p) { - return p, nil - } - dir, err := s.GetDir(context.TODO()) +func pathRelativeToWorkingDir(s llb.State, p string, platform ocispecs.Platform) (string, error) { + dir, err := s.GetDir(context.TODO(), llb.Platform(platform)) if err != nil { return "", err } - return path.Join(dir, p), nil + + if len(p) == 0 { + return dir, nil + } + p, err = system.CheckSystemDriveAndRemoveDriveLetter(p, platform.OS) + if err != nil { + return "", errors.Wrap(err, "removing drive letter") + } + + if system.IsAbs(p, platform.OS) { + return system.NormalizePath("/", p, platform.OS, false) + } + return system.NormalizePath(dir, p, platform.OS, false) } func addEnv(env []string, k, v string) []string { @@ -1514,7 +1498,7 @@ func runCommandString(args []string, buildArgs []instructions.KeyValuePairOption return strings.Join(append(tmpBuildEnv, args...), " ") } -func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State, tm *time.Time) error { +func commitToHistory(img *image.Image, msg string, withLayer bool, st *llb.State, tm *time.Time) error { if st != nil { msg += " # buildkit" } @@ -1642,7 +1626,7 @@ type mutableOutput struct { llb.Output } -func withShell(img Image, args []string) []string { +func withShell(img image.Image, args []string) []string { var shell []string if len(img.Config.Shell) > 0 { shell = append([]string{}, img.Config.Shell...) @@ -1652,7 +1636,7 @@ func withShell(img Image, args []string) []string { return append(shell, strings.Join(args, " ")) } -func autoDetectPlatform(img Image, target ocispecs.Platform, supported []ocispecs.Platform) ocispecs.Platform { +func autoDetectPlatform(img image.Image, target ocispecs.Platform, supported []ocispecs.Platform) ocispecs.Platform { os := img.OS arch := img.Architecture if target.OS == os && target.Architecture == arch { @@ -1666,10 +1650,6 @@ func autoDetectPlatform(img Image, target ocispecs.Platform, supported []ocispec return target } -func WithInternalName(name string) llb.ConstraintsOpt { - return llb.WithCustomName("[internal] " + name) -} - func uppercaseCmd(str string) string { p := strings.SplitN(str, " ", 2) p[0] = strings.ToUpper(p[0]) @@ -1794,7 +1774,7 @@ func commonImageNames() []string { return out } -func clampTimes(img Image, tm *time.Time) Image { +func clampTimes(img image.Image, tm *time.Time) image.Image { if tm == nil { return img } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go deleted file mode 100644 index 4506baeb8b..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build dfaddchecksum -// +build dfaddchecksum - -package dockerfile2llb - -const addChecksumEnabled = true diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addgit.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addgit.go deleted file mode 100644 index 9ccb7a20e8..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addgit.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build dfaddgit -// +build dfaddgit - -package dockerfile2llb - -const addGitEnabled = true diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go deleted file mode 100644 index 8de035297c..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build !dfaddchecksum -// +build !dfaddchecksum - -package dockerfile2llb - -const addChecksumEnabled = false diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go deleted file mode 100644 index 119bb32c88..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build !dfaddgit -// +build !dfaddgit - -package dockerfile2llb - -const addGitEnabled = false diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go index 1015590a0d..7485357bab 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go @@ -96,7 +96,7 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []* } if mount.ReadOnly { mountOpts = append(mountOpts, llb.Readonly) - } else if mount.Type == instructions.MountTypeBind && opt.llbCaps.Supports(pb.CapExecMountBindReadWriteNoOuput) == nil { + } else if mount.Type == instructions.MountTypeBind && opt.llbCaps.Supports(pb.CapExecMountBindReadWriteNoOutput) == nil { mountOpts = append(mountOpts, llb.ForceNoOutput) } if mount.Type == instructions.MountTypeCache { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go index 5c3bdeec32..70d81262bc 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go @@ -6,11 +6,7 @@ import ( ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) -// Image is the JSON structure which describes some basic information about the image. -// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. -type Image image.Image - -func clone(src Image) Image { +func clone(src image.Image) image.Image { img := src img.Config = src.Config img.Config.Env = append([]string{}, src.Config.Env...) @@ -19,8 +15,8 @@ func clone(src Image) Image { return img } -func emptyImage(platform ocispecs.Platform) Image { - img := Image{} +func emptyImage(platform ocispecs.Platform) image.Image { + img := image.Image{} img.Architecture = platform.Architecture img.OS = platform.OS img.Variant = platform.Variant diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go index a527175b73..66e50d8aad 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go @@ -156,14 +156,7 @@ func (bf *BFlags) Parse() error { return nil } - arg = arg[2:] - value := "" - - index := strings.Index(arg, "=") - if index >= 0 { - value = arg[index+1:] - arg = arg[:index] - } + arg, value, hasValue := strings.Cut(arg[2:], "=") flag, ok := bf.flags[arg] if !ok { @@ -180,27 +173,27 @@ func (bf *BFlags) Parse() error { switch flag.flagType { case boolType: // value == "" is only ok if no "=" was specified - if index >= 0 && value == "" { + if hasValue && value == "" { return errors.Errorf("missing a value on flag: %s", arg) } - lower := strings.ToLower(value) - if lower == "" { + switch strings.ToLower(value) { + case "true", "": flag.Value = "true" - } else if lower == "true" || lower == "false" { - flag.Value = lower - } else { + case "false": + flag.Value = "false" + default: return errors.Errorf("expecting boolean value for flag %s, not: %s", arg, value) } case stringType: - if index < 0 { + if !hasValue { return errors.Errorf("missing a value on flag: %s", arg) } flag.Value = value case stringsType: - if index < 0 { + if !hasValue { return errors.Errorf("missing a value on flag: %s", arg) } flag.StringValues = append(flag.StringValues, value) diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go index e328b27bc7..34e8fcc91d 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go @@ -11,13 +11,17 @@ import ( "github.com/pkg/errors" ) -const MountTypeBind = "bind" -const MountTypeCache = "cache" -const MountTypeTmpfs = "tmpfs" -const MountTypeSecret = "secret" -const MountTypeSSH = "ssh" +type MountType string -var allowedMountTypes = map[string]struct{}{ +const ( + MountTypeBind MountType = "bind" + MountTypeCache MountType = "cache" + MountTypeTmpfs MountType = "tmpfs" + MountTypeSecret MountType = "secret" + MountTypeSSH MountType = "ssh" +) + +var allowedMountTypes = map[MountType]struct{}{ MountTypeBind: {}, MountTypeCache: {}, MountTypeTmpfs: {}, @@ -25,11 +29,15 @@ var allowedMountTypes = map[string]struct{}{ MountTypeSSH: {}, } -const MountSharingShared = "shared" -const MountSharingPrivate = "private" -const MountSharingLocked = "locked" +type ShareMode string -var allowedSharingTypes = map[string]struct{}{ +const ( + MountSharingShared ShareMode = "shared" + MountSharingPrivate ShareMode = "private" + MountSharingLocked ShareMode = "locked" +) + +var allowedSharingModes = map[ShareMode]struct{}{ MountSharingShared: {}, MountSharingPrivate: {}, MountSharingLocked: {}, @@ -44,31 +52,18 @@ func init() { parseRunPostHooks = append(parseRunPostHooks, runMountPostHook) } -func isValidMountType(s string) bool { - if s == "secret" { - if !isSecretMountsSupported() { - return false - } +func allShareModes() []string { + types := make([]string, 0, len(allowedSharingModes)) + for k := range allowedSharingModes { + types = append(types, string(k)) } - if s == "ssh" { - if !isSSHMountsSupported() { - return false - } - } - _, ok := allowedMountTypes[s] - return ok + return types } func allMountTypes() []string { - types := make([]string, 0, len(allowedMountTypes)+2) + types := make([]string, 0, len(allowedMountTypes)) for k := range allowedMountTypes { - types = append(types, k) - } - if isSecretMountsSupported() { - types = append(types, "secret") - } - if isSSHMountsSupported() { - types = append(types, "ssh") + types = append(types, string(k)) } return types } @@ -119,22 +114,22 @@ type mountState struct { } type Mount struct { - Type string + Type MountType From string Source string Target string ReadOnly bool SizeLimit int64 CacheID string - CacheSharing string + CacheSharing ShareMode Required bool Mode *uint64 UID *uint64 GID *uint64 } -func parseMount(value string, expander SingleWordExpander) (*Mount, error) { - csvReader := csv.NewReader(strings.NewReader(value)) +func parseMount(val string, expander SingleWordExpander) (*Mount, error) { + csvReader := csv.NewReader(strings.NewReader(val)) fields, err := csvReader.Read() if err != nil { return nil, errors.Wrap(err, "failed to parse csv mounts") @@ -145,10 +140,10 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { roAuto := true for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) + key, value, ok := strings.Cut(field, "=") + key = strings.ToLower(key) - if len(parts) == 1 { + if !ok { if expander == nil { continue // evaluate later } @@ -162,27 +157,24 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { roAuto = false continue case "required": - if m.Type == "secret" || m.Type == "ssh" { + if m.Type == MountTypeSecret || m.Type == MountTypeSSH { m.Required = true continue } else { return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) } + default: + // any other option requires a value. + return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) } } - if len(parts) != 2 { - return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] // check for potential variable if expander != nil { - processed, err := expander(value) + value, err = expander(value) if err != nil { return nil, err } - value = processed } else if key == "from" { if matched, err := regexp.MatchString(`\$.`, value); err != nil { //nolint return nil, err @@ -196,10 +188,11 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { switch key { case "type": - if !isValidMountType(strings.ToLower(value)) { + v := MountType(strings.ToLower(value)) + if _, ok := allowedMountTypes[v]; !ok { return nil, suggest.WrapError(errors.Errorf("unsupported mount type %q", value), value, allMountTypes(), true) } - m.Type = strings.ToLower(value) + m.Type = v case "from": m.From = value case "source", "src": @@ -220,17 +213,16 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { m.ReadOnly = !rw roAuto = false case "required": - if m.Type == "secret" || m.Type == "ssh" { - v, err := strconv.ParseBool(value) + if m.Type == MountTypeSecret || m.Type == MountTypeSSH { + m.Required, err = strconv.ParseBool(value) if err != nil { return nil, errors.Errorf("invalid value for %s: %s", key, value) } - m.Required = v } else { return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) } case "size": - if m.Type == "tmpfs" { + if m.Type == MountTypeTmpfs { m.SizeLimit, err = units.RAMInBytes(value) if err != nil { return nil, errors.Errorf("invalid value for %s: %s", key, value) @@ -241,10 +233,11 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { case "id": m.CacheID = value case "sharing": - if _, ok := allowedSharingTypes[strings.ToLower(value)]; !ok { - return nil, errors.Errorf("unsupported sharing value %q", value) + v := ShareMode(strings.ToLower(value)) + if _, ok := allowedSharingModes[v]; !ok { + return nil, suggest.WrapError(errors.Errorf("unsupported sharing value %q", value), value, allShareModes(), true) } - m.CacheSharing = strings.ToLower(value) + m.CacheSharing = v case "mode": mode, err := strconv.ParseUint(value, 8, 32) if err != nil { @@ -273,16 +266,16 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { fileInfoAllowed := m.Type == MountTypeSecret || m.Type == MountTypeSSH || m.Type == MountTypeCache - if m.Mode != nil && !fileInfoAllowed { - return nil, errors.Errorf("mode not allowed for %q type mounts", m.Type) - } - - if m.UID != nil && !fileInfoAllowed { - return nil, errors.Errorf("uid not allowed for %q type mounts", m.Type) - } - - if m.GID != nil && !fileInfoAllowed { - return nil, errors.Errorf("gid not allowed for %q type mounts", m.Type) + if !fileInfoAllowed { + if m.Mode != nil { + return nil, errors.Errorf("mode not allowed for %q type mounts", m.Type) + } + if m.UID != nil { + return nil, errors.Errorf("uid not allowed for %q type mounts", m.Type) + } + if m.GID != nil { + return nil, errors.Errorf("gid not allowed for %q type mounts", m.Type) + } } if roAuto { @@ -293,10 +286,6 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { } } - if m.CacheSharing != "" && m.Type != MountTypeCache { - return nil, errors.Errorf("invalid cache sharing set for %v mount", m.Type) - } - if m.Type == MountTypeSecret { if m.From != "" { return nil, errors.Errorf("secret mount should not have a from") @@ -312,5 +301,9 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { } } + if m.CacheSharing != "" && m.Type != MountTypeCache { + return nil, errors.Errorf("invalid cache sharing set for %v mount", m.Type) + } + return m, nil } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go index 142c3075b5..0ced44dae6 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go @@ -4,13 +4,15 @@ import ( "github.com/pkg/errors" ) +type NetworkMode = string + const ( - NetworkDefault = "default" - NetworkNone = "none" - NetworkHost = "host" + NetworkDefault NetworkMode = "default" + NetworkNone NetworkMode = "none" + NetworkHost NetworkMode = "host" ) -var allowedNetwork = map[string]struct{}{ +var allowedNetwork = map[NetworkMode]struct{}{ NetworkDefault: {}, NetworkNone: {}, NetworkHost: {}, @@ -51,7 +53,7 @@ func runNetworkPostHook(cmd *RunCommand, req parseRequest) error { return nil } -func GetNetwork(cmd *RunCommand) string { +func GetNetwork(cmd *RunCommand) NetworkMode { return cmd.getExternalValue(networkKey).(*networkState).networkMode } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go deleted file mode 100644 index 2b4140b72a..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go +++ /dev/null @@ -1,5 +0,0 @@ -package instructions - -func isSecretMountsSupported() bool { - return true -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go deleted file mode 100644 index 0e4e5f38c7..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go +++ /dev/null @@ -1,5 +0,0 @@ -package instructions - -func isSSHMountsSupported() bool { - return true -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go index 6c362fc6fa..5e03f84243 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go @@ -357,12 +357,13 @@ func parseFrom(req parseRequest) (*Stage, error) { }, nil } -func parseBuildStageName(args []string) (string, error) { - stageName := "" +var validStageName = regexp.MustCompile("^[a-z][a-z0-9-_.]*$") + +func parseBuildStageName(args []string) (stageName string, err error) { switch { case len(args) == 3 && strings.EqualFold(args[1], "as"): stageName = strings.ToLower(args[2]) - if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok { + if !validStageName.MatchString(stageName) { return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", args[2]) } case len(args) != 1: @@ -543,6 +544,7 @@ func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { flInterval := req.flags.AddString("interval", "") flTimeout := req.flags.AddString("timeout", "") flStartPeriod := req.flags.AddString("start-period", "") + flStartInterval := req.flags.AddString("start-interval", "") flRetries := req.flags.AddString("retries", "") if err := req.flags.Parse(); err != nil { @@ -583,6 +585,12 @@ func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { } healthcheck.StartPeriod = startPeriod + startInterval, err := parseOptInterval(flStartInterval) + if err != nil { + return nil, err + } + healthcheck.StartInterval = startInterval + if flRetries.Value != "" { retries, err := strconv.ParseInt(flRetries.Value, 10, 32) if err != nil { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go index d6723635d4..4a6129fdc8 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go @@ -49,8 +49,7 @@ func (node *Node) Location() []Range { // Dump dumps the AST defined by `node` as a list of sexps. // Returns a string suitable for printing. func (node *Node) Dump() string { - str := "" - str += strings.ToLower(node.Value) + str := strings.ToLower(node.Value) if len(node.Flags) > 0 { str += fmt.Sprintf(" %q", node.Flags) diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go index bf0887f236..f9aca5d9ef 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go @@ -4,8 +4,8 @@ package shell // EqualEnvKeys compare two strings and returns true if they are equal. -// On Unix this comparison is case sensitive. -// On Windows this comparison is case insensitive. +// On Unix this comparison is case-sensitive. +// On Windows this comparison is case-insensitive. func EqualEnvKeys(from, to string) bool { return from == to } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go index 010569bbaa..7bbed9b207 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go @@ -3,8 +3,8 @@ package shell import "strings" // EqualEnvKeys compare two strings and returns true if they are equal. -// On Unix this comparison is case sensitive. -// On Windows this comparison is case insensitive. +// On Unix this comparison is case-sensitive. +// On Windows this comparison is case-insensitive. func EqualEnvKeys(from, to string) bool { - return strings.ToUpper(from) == strings.ToUpper(to) + return strings.EqualFold(from, to) } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go index b930ab3260..80806f8ba7 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go @@ -335,39 +335,23 @@ func (sw *shellWord) processDollar() (string, error) { } name := sw.processName() ch := sw.scanner.Next() + chs := string(ch) + nullIsUnset := false + switch ch { case '}': // Normal ${xx} case - value, found := sw.getEnv(name) - if !found && sw.skipUnsetEnv { + value, set := sw.getEnv(name) + if !set && sw.skipUnsetEnv { return fmt.Sprintf("${%s}", name), nil } return value, nil - case '?': - word, _, err := sw.processStopOn('}') - if err != nil { - if sw.scanner.Peek() == scanner.EOF { - return "", errors.New("syntax error: missing '}'") - } - return "", err - } - newValue, found := sw.getEnv(name) - if !found { - if sw.skipUnsetEnv { - return fmt.Sprintf("${%s?%s}", name, word), nil - } - message := "is not allowed to be unset" - if word != "" { - message = word - } - return "", errors.Errorf("%s: %s", name, message) - } - return newValue, nil case ':': - // Special ${xx:...} format processing - // Yes it allows for recursive $'s in the ... spot - modifier := sw.scanner.Next() - + nullIsUnset = true + ch = sw.scanner.Next() + chs += string(ch) + fallthrough + case '+', '-', '?': word, _, err := sw.processStopOn('}') if err != nil { if sw.scanner.Peek() == scanner.EOF { @@ -378,53 +362,44 @@ func (sw *shellWord) processDollar() (string, error) { // Grab the current value of the variable in question so we // can use it to determine what to do based on the modifier - newValue, found := sw.getEnv(name) - - switch modifier { - case '+': - if newValue != "" { - newValue = word - } - if !found && sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil - } - return newValue, nil + value, set := sw.getEnv(name) + if sw.skipUnsetEnv && !set { + return fmt.Sprintf("${%s%s%s}", name, chs, word), nil + } + switch ch { case '-': - if newValue == "" { - newValue = word + if !set || (nullIsUnset && value == "") { + return word, nil } - if !found && sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil + return value, nil + case '+': + if !set || (nullIsUnset && value == "") { + return "", nil } - - return newValue, nil - + return word, nil case '?': - if !found { - if sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil - } + if !set { message := "is not allowed to be unset" if word != "" { message = word } return "", errors.Errorf("%s: %s", name, message) } - if newValue == "" { + if nullIsUnset && value == "" { message := "is not allowed to be empty" if word != "" { message = word } return "", errors.Errorf("%s: %s", name, message) } - return newValue, nil - + return value, nil default: - return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier) + return "", errors.Errorf("unsupported modifier (%s) in substitution", chs) } + default: + return "", errors.Errorf("unsupported modifier (%s) in substitution", chs) } - return "", errors.Errorf("missing ':' in substitution") } func (sw *shellWord) processName() string { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go b/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go new file mode 100644 index 0000000000..52ec012243 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go @@ -0,0 +1,138 @@ +package dockerui + +import ( + "encoding/csv" + "net" + "strconv" + "strings" + "time" + + "github.com/containerd/containerd/platforms" + "github.com/docker/go-units" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/solver/pb" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func parsePlatforms(v string) ([]ocispecs.Platform, error) { + var pp []ocispecs.Platform + for _, v := range strings.Split(v, ",") { + p, err := platforms.Parse(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse target platform %s", v) + } + pp = append(pp, platforms.Normalize(p)) + } + return pp, nil +} + +func parseResolveMode(v string) (llb.ResolveMode, error) { + switch v { + case pb.AttrImageResolveModeDefault, "": + return llb.ResolveModeDefault, nil + case pb.AttrImageResolveModeForcePull: + return llb.ResolveModeForcePull, nil + case pb.AttrImageResolveModePreferLocal: + return llb.ResolveModePreferLocal, nil + default: + return 0, errors.Errorf("invalid image-resolve-mode: %s", v) + } +} + +func parseExtraHosts(v string) ([]llb.HostIP, error) { + if v == "" { + return nil, nil + } + out := make([]llb.HostIP, 0) + csvReader := csv.NewReader(strings.NewReader(v)) + fields, err := csvReader.Read() + if err != nil { + return nil, err + } + for _, field := range fields { + key, val, ok := strings.Cut(strings.ToLower(field), "=") + if !ok { + return nil, errors.Errorf("invalid key-value pair %s", field) + } + ip := net.ParseIP(val) + if ip == nil { + return nil, errors.Errorf("failed to parse IP %s", val) + } + out = append(out, llb.HostIP{Host: key, IP: ip}) + } + return out, nil +} + +func parseShmSize(v string) (int64, error) { + if len(v) == 0 { + return 0, nil + } + kb, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, err + } + return kb, nil +} + +func parseUlimits(v string) ([]pb.Ulimit, error) { + if v == "" { + return nil, nil + } + out := make([]pb.Ulimit, 0) + csvReader := csv.NewReader(strings.NewReader(v)) + fields, err := csvReader.Read() + if err != nil { + return nil, err + } + for _, field := range fields { + ulimit, err := units.ParseUlimit(field) + if err != nil { + return nil, err + } + out = append(out, pb.Ulimit{ + Name: ulimit.Name, + Soft: ulimit.Soft, + Hard: ulimit.Hard, + }) + } + return out, nil +} + +func parseNetMode(v string) (pb.NetMode, error) { + if v == "" { + return llb.NetModeSandbox, nil + } + switch v { + case "none": + return llb.NetModeNone, nil + case "host": + return llb.NetModeHost, nil + case "sandbox": + return llb.NetModeSandbox, nil + default: + return 0, errors.Errorf("invalid netmode %s", v) + } +} + +func parseSourceDateEpoch(v string) (*time.Time, error) { + if v == "" { + return nil, nil + } + sde, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "invalid SOURCE_DATE_EPOCH: %s", v) + } + tm := time.Unix(sde, 0).UTC() + return &tm, nil +} + +func filter(opt map[string]string, key string) map[string]string { + m := map[string]string{} + for k, v := range opt { + if strings.HasPrefix(k, key) { + m[strings.TrimPrefix(k, key)] = v + } + } + return m +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/build.go b/vendor/github.com/moby/buildkit/frontend/dockerui/build.go new file mode 100644 index 0000000000..8fc9bbbff1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/build.go @@ -0,0 +1,114 @@ +package dockerui + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/containerimage/image" + "github.com/moby/buildkit/frontend/gateway/client" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +type BuildFunc func(ctx context.Context, platform *ocispecs.Platform, idx int) (client.Reference, *image.Image, error) + +func (bc *Client) Build(ctx context.Context, fn BuildFunc) (*ResultBuilder, error) { + res := client.NewResult() + + targets := make([]*ocispecs.Platform, 0, len(bc.TargetPlatforms)) + for _, p := range bc.TargetPlatforms { + p := p + targets = append(targets, &p) + } + if len(targets) == 0 { + targets = append(targets, nil) + } + expPlatforms := &exptypes.Platforms{ + Platforms: make([]exptypes.Platform, len(targets)), + } + + eg, ctx := errgroup.WithContext(ctx) + + for i, tp := range targets { + i, tp := i, tp + eg.Go(func() error { + ref, img, err := fn(ctx, tp, i) + if err != nil { + return err + } + + config, err := json.Marshal(img) + if err != nil { + return errors.Wrapf(err, "failed to marshal image config") + } + + p := platforms.DefaultSpec() + if tp != nil { + p = *tp + } + + // in certain conditions we allow input platform to be extended from base image + if p.OS == "windows" && img.OS == p.OS { + if p.OSVersion == "" && img.OSVersion != "" { + p.OSVersion = img.OSVersion + } + if p.OSFeatures == nil && len(img.OSFeatures) > 0 { + p.OSFeatures = img.OSFeatures + } + } + + p = platforms.Normalize(p) + k := platforms.Format(p) + + if bc.MultiPlatformRequested { + res.AddRef(k, ref) + res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config) + } else { + res.SetRef(ref) + res.AddMeta(exptypes.ExporterImageConfigKey, config) + } + expPlatforms.Platforms[i] = exptypes.Platform{ + ID: k, + Platform: p, + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, err + } + return &ResultBuilder{ + Result: res, + expPlatforms: expPlatforms, + }, nil +} + +type ResultBuilder struct { + *client.Result + expPlatforms *exptypes.Platforms +} + +func (rb *ResultBuilder) Finalize() (*client.Result, error) { + dt, err := json.Marshal(rb.expPlatforms) + if err != nil { + return nil, err + } + rb.AddMeta(exptypes.ExporterPlatformsKey, dt) + + return rb.Result, nil +} + +func (rb *ResultBuilder) EachPlatform(ctx context.Context, fn func(ctx context.Context, id string, p ocispecs.Platform) error) error { + eg, ctx := errgroup.WithContext(ctx) + for _, p := range rb.expPlatforms.Platforms { + p := p + eg.Go(func() error { + return fn(ctx, p.ID, p.Platform) + }) + } + return eg.Wait() +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/config.go b/vendor/github.com/moby/buildkit/frontend/dockerui/config.go new file mode 100644 index 0000000000..12ec2c6880 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/config.go @@ -0,0 +1,496 @@ +package dockerui + +import ( + "bytes" + "context" + "encoding/json" + "path" + "strconv" + "strings" + "time" + + "github.com/containerd/containerd/platforms" + "github.com/docker/distribution/reference" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage/image" + "github.com/moby/buildkit/frontend/attestations" + "github.com/moby/buildkit/frontend/dockerfile/dockerignore" + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/flightcontrol" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + buildArgPrefix = "build-arg:" + labelPrefix = "label:" + + keyTarget = "target" + keyCgroupParent = "cgroup-parent" + keyForceNetwork = "force-network-mode" + keyGlobalAddHosts = "add-hosts" + keyHostname = "hostname" + keyImageResolveMode = "image-resolve-mode" + keyMultiPlatform = "multi-platform" + keyNoCache = "no-cache" + keyShmSize = "shm-size" + keyTargetPlatform = "platform" + keyUlimit = "ulimit" + keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports + keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry + + // Don't forget to update frontend documentation if you add + // a new build-arg: frontend/dockerfile/docs/reference.md + keyCacheNSArg = "build-arg:BUILDKIT_CACHE_MOUNT_NS" + keyMultiPlatformArg = "build-arg:BUILDKIT_MULTI_PLATFORM" + keyHostnameArg = "build-arg:BUILDKIT_SANDBOX_HOSTNAME" + keyContextKeepGitDirArg = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR" + keySourceDateEpoch = "build-arg:SOURCE_DATE_EPOCH" +) + +type Config struct { + BuildArgs map[string]string + CacheIDNamespace string + CgroupParent string + Epoch *time.Time + ExtraHosts []llb.HostIP + Hostname string + ImageResolveMode llb.ResolveMode + Labels map[string]string + NetworkMode pb.NetMode + ShmSize int64 + Target string + Ulimits []pb.Ulimit + + CacheImports []client.CacheOptionsEntry + TargetPlatforms []ocispecs.Platform // nil means default + BuildPlatforms []ocispecs.Platform + MultiPlatformRequested bool + SBOM *SBOM +} + +type Client struct { + Config + client client.Client + ignoreCache []string + bctx *buildContext + g flightcontrol.Group[*buildContext] + bopts client.BuildOpts + + dockerignore []byte +} + +type SBOM struct { + Generator string +} + +type Source struct { + *llb.SourceMap + Warn func(context.Context, string, client.WarnOpts) +} + +type ContextOpt struct { + NoDockerignore bool + LocalOpts []llb.LocalOption + Platform *ocispecs.Platform + ResolveMode string + CaptureDigest *digest.Digest +} + +func validateMinCaps(c client.Client) error { + opts := c.BuildOpts().Opts + caps := c.BuildOpts().LLBCaps + + if err := caps.Supports(pb.CapFileBase); err != nil { + return errors.Wrap(err, "needs BuildKit 0.5 or later") + } + if opts["override-copy-image"] != "" { + return errors.New("support for \"override-copy-image\" was removed in BuildKit 0.11") + } + if v, ok := opts["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok { + if b, err := strconv.ParseBool(v); err == nil && b { + return errors.New("support for \"BUILDKIT_DISABLE_FILEOP\" build-arg was removed in BuildKit 0.11") + } + } + return nil +} + +func NewClient(c client.Client) (*Client, error) { + if err := validateMinCaps(c); err != nil { + return nil, err + } + + bc := &Client{ + client: c, + bopts: c.BuildOpts(), // avoid grpc on every call + } + + if err := bc.init(); err != nil { + return nil, err + } + + return bc, nil +} + +func (bc *Client) BuildOpts() client.BuildOpts { + return bc.bopts +} + +func (bc *Client) init() error { + opts := bc.bopts.Opts + + defaultBuildPlatform := platforms.Normalize(platforms.DefaultSpec()) + if workers := bc.bopts.Workers; len(workers) > 0 && len(workers[0].Platforms) > 0 { + defaultBuildPlatform = workers[0].Platforms[0] + } + buildPlatforms := []ocispecs.Platform{defaultBuildPlatform} + targetPlatforms := []ocispecs.Platform{} + if v := opts[keyTargetPlatform]; v != "" { + var err error + targetPlatforms, err = parsePlatforms(v) + if err != nil { + return err + } + } + bc.BuildPlatforms = buildPlatforms + bc.TargetPlatforms = targetPlatforms + + resolveMode, err := parseResolveMode(opts[keyImageResolveMode]) + if err != nil { + return err + } + bc.ImageResolveMode = resolveMode + + extraHosts, err := parseExtraHosts(opts[keyGlobalAddHosts]) + if err != nil { + return errors.Wrap(err, "failed to parse additional hosts") + } + bc.ExtraHosts = extraHosts + + shmSize, err := parseShmSize(opts[keyShmSize]) + if err != nil { + return errors.Wrap(err, "failed to parse shm size") + } + bc.ShmSize = shmSize + + ulimits, err := parseUlimits(opts[keyUlimit]) + if err != nil { + return errors.Wrap(err, "failed to parse ulimit") + } + bc.Ulimits = ulimits + + defaultNetMode, err := parseNetMode(opts[keyForceNetwork]) + if err != nil { + return err + } + bc.NetworkMode = defaultNetMode + + var ignoreCache []string + if v, ok := opts[keyNoCache]; ok { + if v == "" { + ignoreCache = []string{} // means all stages + } else { + ignoreCache = strings.Split(v, ",") + } + } + bc.ignoreCache = ignoreCache + + multiPlatform := len(targetPlatforms) > 1 + if v := opts[keyMultiPlatformArg]; v != "" { + opts[keyMultiPlatform] = v + } + if v := opts[keyMultiPlatform]; v != "" { + b, err := strconv.ParseBool(v) + if err != nil { + return errors.Errorf("invalid boolean value for multi-platform: %s", v) + } + if !b && multiPlatform { + return errors.Errorf("conflicting config: returning multiple target platforms is not allowed") + } + multiPlatform = b + } + bc.MultiPlatformRequested = multiPlatform + + var cacheImports []client.CacheOptionsEntry + // new API + if cacheImportsStr := opts[keyCacheImports]; cacheImportsStr != "" { + var cacheImportsUM []controlapi.CacheOptionsEntry + if err := json.Unmarshal([]byte(cacheImportsStr), &cacheImportsUM); err != nil { + return errors.Wrapf(err, "failed to unmarshal %s (%q)", keyCacheImports, cacheImportsStr) + } + for _, um := range cacheImportsUM { + cacheImports = append(cacheImports, client.CacheOptionsEntry{Type: um.Type, Attrs: um.Attrs}) + } + } + // old API + if cacheFromStr := opts[keyCacheFrom]; cacheFromStr != "" { + cacheFrom := strings.Split(cacheFromStr, ",") + for _, s := range cacheFrom { + im := client.CacheOptionsEntry{ + Type: "registry", + Attrs: map[string]string{ + "ref": s, + }, + } + // FIXME(AkihiroSuda): skip append if already exists + cacheImports = append(cacheImports, im) + } + } + bc.CacheImports = cacheImports + + epoch, err := parseSourceDateEpoch(opts[keySourceDateEpoch]) + if err != nil { + return err + } + bc.Epoch = epoch + + attests, err := attestations.Parse(opts) + if err != nil { + return err + } + if attrs, ok := attests[attestations.KeyTypeSbom]; ok { + src, ok := attrs["generator"] + if !ok { + return errors.Errorf("sbom scanner cannot be empty") + } + ref, err := reference.ParseNormalizedNamed(src) + if err != nil { + return errors.Wrapf(err, "failed to parse sbom scanner %s", src) + } + ref = reference.TagNameOnly(ref) + bc.SBOM = &SBOM{ + Generator: ref.String(), + } + } + + bc.BuildArgs = filter(opts, buildArgPrefix) + bc.Labels = filter(opts, labelPrefix) + bc.CacheIDNamespace = opts[keyCacheNSArg] + bc.CgroupParent = opts[keyCgroupParent] + bc.Target = opts[keyTarget] + + if v, ok := opts[keyHostnameArg]; ok && len(v) > 0 { + opts[keyHostname] = v + } + bc.Hostname = opts[keyHostname] + return nil +} + +func (bc *Client) buildContext(ctx context.Context) (*buildContext, error) { + return bc.g.Do(ctx, "initcontext", func(ctx context.Context) (*buildContext, error) { + if bc.bctx != nil { + return bc.bctx, nil + } + bctx, err := bc.initContext(ctx) + if err == nil { + bc.bctx = bctx + } + return bctx, err + }) +} + +func (bc *Client) ReadEntrypoint(ctx context.Context, lang string, opts ...llb.LocalOption) (*Source, error) { + bctx, err := bc.buildContext(ctx) + if err != nil { + return nil, err + } + + var src *llb.State + + if !bctx.forceLocalDockerfile { + if bctx.dockerfile != nil { + src = bctx.dockerfile + } + } + + if src == nil { + name := "load build definition from " + bctx.filename + + filenames := []string{bctx.filename, bctx.filename + ".dockerignore"} + + // dockerfile is also supported casing moby/moby#10858 + if path.Base(bctx.filename) == DefaultDockerfileName { + filenames = append(filenames, path.Join(path.Dir(bctx.filename), strings.ToLower(DefaultDockerfileName))) + } + + opts = append([]llb.LocalOption{ + llb.FollowPaths(filenames), + llb.SessionID(bc.bopts.SessionID), + llb.SharedKeyHint(bctx.dockerfileLocalName), + WithInternalName(name), + llb.Differ(llb.DiffNone, false), + }, opts...) + + lsrc := llb.Local(bctx.dockerfileLocalName, opts...) + src = &lsrc + } + + def, err := src.Marshal(ctx, bc.marshalOpts()...) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal local source") + } + + defVtx, err := def.Head() + if err != nil { + return nil, err + } + + res, err := bc.client.Solve(ctx, client.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to resolve dockerfile") + } + + ref, err := res.SingleRef() + if err != nil { + return nil, err + } + + dt, err := ref.ReadFile(ctx, client.ReadRequest{ + Filename: bctx.filename, + }) + if err != nil { + if path.Base(bctx.filename) == DefaultDockerfileName { + var err1 error + dt, err1 = ref.ReadFile(ctx, client.ReadRequest{ + Filename: path.Join(path.Dir(bctx.filename), strings.ToLower(DefaultDockerfileName)), + }) + if err1 == nil { + err = nil + } + } + if err != nil { + return nil, errors.Wrapf(err, "failed to read dockerfile") + } + } + smap := llb.NewSourceMap(src, bctx.filename, lang, dt) + smap.Definition = def + + dt, err = ref.ReadFile(ctx, client.ReadRequest{ + Filename: bctx.filename + ".dockerignore", + }) + if err == nil { + bc.dockerignore = dt + } + + return &Source{ + SourceMap: smap, + Warn: func(ctx context.Context, msg string, opts client.WarnOpts) { + if opts.Level == 0 { + opts.Level = 1 + } + if opts.SourceInfo == nil { + opts.SourceInfo = &pb.SourceInfo{ + Data: smap.Data, + Filename: smap.Filename, + Language: smap.Language, + Definition: smap.Definition.ToPB(), + } + } + bc.client.Warn(ctx, defVtx, msg, opts) + }, + }, nil +} + +func (bc *Client) MainContext(ctx context.Context, opts ...llb.LocalOption) (*llb.State, error) { + bctx, err := bc.buildContext(ctx) + if err != nil { + return nil, err + } + + if bctx.context != nil { + return bctx.context, nil + } + + if bc.dockerignore == nil { + st := llb.Local(bctx.contextLocalName, + llb.SessionID(bc.bopts.SessionID), + llb.FollowPaths([]string{DefaultDockerignoreName}), + llb.SharedKeyHint(bctx.contextLocalName+"-"+DefaultDockerignoreName), + WithInternalName("load "+DefaultDockerignoreName), + llb.Differ(llb.DiffNone, false), + ) + def, err := st.Marshal(ctx, bc.marshalOpts()...) + if err != nil { + return nil, err + } + res, err := bc.client.Solve(ctx, client.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + ref, err := res.SingleRef() + if err != nil { + return nil, err + } + dt, _ := ref.ReadFile(ctx, client.ReadRequest{ // ignore error + Filename: DefaultDockerignoreName, + }) + if dt == nil { + dt = []byte{} + } + bc.dockerignore = dt + } + + var excludes []string + if len(bc.dockerignore) != 0 { + excludes, err = dockerignore.ReadAll(bytes.NewBuffer(bc.dockerignore)) + if err != nil { + return nil, errors.Wrap(err, "failed to parse dockerignore") + } + } + + opts = append([]llb.LocalOption{ + llb.SessionID(bc.bopts.SessionID), + llb.ExcludePatterns(excludes), + llb.SharedKeyHint(bctx.contextLocalName), + WithInternalName("load build context"), + }, opts...) + + st := llb.Local(bctx.contextLocalName, opts...) + + return &st, nil +} + +func (bc *Client) NamedContext(ctx context.Context, name string, opt ContextOpt) (*llb.State, *image.Image, error) { + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, nil, errors.Wrapf(err, "invalid context name %s", name) + } + name = strings.TrimSuffix(reference.FamiliarString(named), ":latest") + + pp := platforms.DefaultSpec() + if opt.Platform != nil { + pp = *opt.Platform + } + pname := name + "::" + platforms.Format(platforms.Normalize(pp)) + st, img, err := bc.namedContext(ctx, name, pname, opt) + if err != nil { + return nil, nil, err + } + if st != nil { + return st, img, nil + } + return bc.namedContext(ctx, name, name, opt) +} + +func (bc *Client) IsNoCache(name string) bool { + if len(bc.ignoreCache) == 0 { + return bc.ignoreCache != nil + } + for _, n := range bc.ignoreCache { + if strings.EqualFold(n, name) { + return true + } + } + return false +} + +func WithInternalName(name string) llb.ConstraintsOpt { + return llb.WithCustomName("[internal] " + name) +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/context.go b/vendor/github.com/moby/buildkit/frontend/dockerui/context.go new file mode 100644 index 0000000000..3173558fd6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/context.go @@ -0,0 +1,194 @@ +package dockerui + +import ( + "archive/tar" + "bytes" + "context" + "path/filepath" + "regexp" + "strconv" + + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/gateway/client" + gwpb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/util/gitutil" + "github.com/pkg/errors" +) + +const ( + DefaultLocalNameContext = "context" + DefaultLocalNameDockerfile = "dockerfile" + DefaultDockerfileName = "Dockerfile" + DefaultDockerignoreName = ".dockerignore" + EmptyImageName = "scratch" +) + +const ( + keyFilename = "filename" + keyContextSubDir = "contextsubdir" + keyNameContext = "contextkey" + keyNameDockerfile = "dockerfilekey" +) + +var httpPrefix = regexp.MustCompile(`^https?://`) + +type buildContext struct { + context *llb.State // set if not local + dockerfile *llb.State // override remoteContext if set + contextLocalName string + dockerfileLocalName string + filename string + forceLocalDockerfile bool +} + +func (bc *Client) marshalOpts() []llb.ConstraintsOpt { + return []llb.ConstraintsOpt{llb.WithCaps(bc.bopts.Caps)} +} + +func (bc *Client) initContext(ctx context.Context) (*buildContext, error) { + opts := bc.bopts.Opts + gwcaps := bc.bopts.Caps + + localNameContext := DefaultLocalNameContext + if v, ok := opts[keyNameContext]; ok { + localNameContext = v + } + + bctx := &buildContext{ + contextLocalName: DefaultLocalNameContext, + dockerfileLocalName: DefaultLocalNameDockerfile, + filename: DefaultDockerfileName, + } + + if v, ok := opts[keyFilename]; ok { + bctx.filename = v + } + + if v, ok := opts[keyNameDockerfile]; ok { + bctx.forceLocalDockerfile = true + bctx.dockerfileLocalName = v + } + + keepGit := false + if v, err := strconv.ParseBool(opts[keyContextKeepGitDirArg]); err == nil { + keepGit = v + } + if st, ok := DetectGitContext(opts[localNameContext], keepGit); ok { + bctx.context = st + bctx.dockerfile = st + } else if st, filename, ok := DetectHTTPContext(opts[localNameContext]); ok { + def, err := st.Marshal(ctx, bc.marshalOpts()...) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal httpcontext") + } + res, err := bc.client.Solve(ctx, client.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to resolve httpcontext") + } + + ref, err := res.SingleRef() + if err != nil { + return nil, err + } + + dt, err := ref.ReadFile(ctx, client.ReadRequest{ + Filename: filename, + Range: &client.FileRange{ + Length: 1024, + }, + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to read downloaded context") + } + if isArchive(dt) { + bc := llb.Scratch().File(llb.Copy(*st, filepath.Join("/", filename), "/", &llb.CopyInfo{ + AttemptUnpack: true, + })) + bctx.context = &bc + } else { + bctx.filename = filename + bctx.context = st + } + bctx.dockerfile = bctx.context + } else if (&gwcaps).Supports(gwpb.CapFrontendInputs) == nil { + inputs, err := bc.client.Inputs(ctx) + if err != nil { + return nil, errors.Wrapf(err, "failed to get frontend inputs") + } + + if !bctx.forceLocalDockerfile { + inputDockerfile, ok := inputs[bctx.dockerfileLocalName] + if ok { + bctx.dockerfile = &inputDockerfile + } + } + + inputCtx, ok := inputs[DefaultLocalNameContext] + if ok { + bctx.context = &inputCtx + } + } + + if bctx.context != nil { + if sub, ok := opts[keyContextSubDir]; ok { + bctx.context = scopeToSubDir(bctx.context, sub) + } + } + + return bctx, nil +} + +func DetectGitContext(ref string, keepGit bool) (*llb.State, bool) { + g, err := gitutil.ParseGitRef(ref) + if err != nil { + return nil, false + } + commit := g.Commit + if g.SubDir != "" { + commit += ":" + g.SubDir + } + gitOpts := []llb.GitOption{WithInternalName("load git source " + ref)} + if keepGit { + gitOpts = append(gitOpts, llb.KeepGitDir()) + } + + st := llb.Git(g.Remote, commit, gitOpts...) + return &st, true +} + +func DetectHTTPContext(ref string) (*llb.State, string, bool) { + filename := "context" + if httpPrefix.MatchString(ref) { + st := llb.HTTP(ref, llb.Filename(filename), WithInternalName("load remote build context")) + return &st, filename, true + } + return nil, "", false +} + +func isArchive(header []byte) bool { + for _, m := range [][]byte{ + {0x42, 0x5A, 0x68}, // bzip2 + {0x1F, 0x8B, 0x08}, // gzip + {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, // xz + } { + if len(header) < len(m) { + continue + } + if bytes.Equal(m, header[:len(m)]) { + return true + } + } + + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +func scopeToSubDir(c *llb.State, dir string) *llb.State { + bc := llb.Scratch().File(llb.Copy(*c, dir, "/", &llb.CopyInfo{ + CopyDirContentsOnly: true, + })) + return &bc +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go b/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go new file mode 100644 index 0000000000..6a441c5082 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go @@ -0,0 +1,253 @@ +package dockerui + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/containerimage/image" + "github.com/moby/buildkit/frontend/dockerfile/dockerignore" + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/util/imageutil" + "github.com/pkg/errors" +) + +const ( + contextPrefix = "context:" + inputMetadataPrefix = "input-metadata:" + maxContextRecursion = 10 +) + +func (bc *Client) namedContext(ctx context.Context, name string, nameWithPlatform string, opt ContextOpt) (*llb.State, *image.Image, error) { + return bc.namedContextRecursive(ctx, name, nameWithPlatform, opt, 0) +} + +func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWithPlatform string, opt ContextOpt, count int) (*llb.State, *image.Image, error) { + opts := bc.bopts.Opts + v, ok := opts[contextPrefix+nameWithPlatform] + if !ok { + return nil, nil, nil + } + + if count > maxContextRecursion { + return nil, nil, errors.New("context recursion limit exceeded; this may indicate a cycle in the provided source policies: " + v) + } + + vv := strings.SplitN(v, ":", 2) + if len(vv) != 2 { + return nil, nil, errors.Errorf("invalid context specifier %s for %s", v, nameWithPlatform) + } + + // allow git@ without protocol for SSH URLs for backwards compatibility + if strings.HasPrefix(vv[0], "git@") { + vv[0] = "git" + } + switch vv[0] { + case "docker-image": + ref := strings.TrimPrefix(vv[1], "//") + if ref == EmptyImageName { + st := llb.Scratch() + return &st, nil, nil + } + + imgOpt := []llb.ImageOption{ + llb.WithCustomName("[context " + nameWithPlatform + "] " + ref), + } + if opt.Platform != nil { + imgOpt = append(imgOpt, llb.Platform(*opt.Platform)) + } + + named, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, nil, err + } + + named = reference.TagNameOnly(named) + + ref, dgst, data, err := bc.client.ResolveImageConfig(ctx, named.String(), llb.ResolveImageConfigOpt{ + Platform: opt.Platform, + ResolveMode: opt.ResolveMode, + LogName: fmt.Sprintf("[context %s] load metadata for %s", nameWithPlatform, ref), + ResolverType: llb.ResolverTypeRegistry, + }) + if err != nil { + e := &imageutil.ResolveToNonImageError{} + if errors.As(err, &e) { + return bc.namedContextRecursive(ctx, e.Updated, name, opt, count+1) + } + return nil, nil, err + } + + var img image.Image + if err := json.Unmarshal(data, &img); err != nil { + return nil, nil, err + } + img.Created = nil + + st := llb.Image(ref, imgOpt...) + st, err = st.WithImageConfig(data) + if err != nil { + return nil, nil, err + } + if opt.CaptureDigest != nil { + *opt.CaptureDigest = dgst + } + return &st, &img, nil + case "git": + st, ok := DetectGitContext(v, true) + if !ok { + return nil, nil, errors.Errorf("invalid git context %s", v) + } + return st, nil, nil + case "http", "https": + st, ok := DetectGitContext(v, true) + if !ok { + httpst := llb.HTTP(v, llb.WithCustomName("[context "+nameWithPlatform+"] "+v)) + st = &httpst + } + return st, nil, nil + case "oci-layout": + refSpec := strings.TrimPrefix(vv[1], "//") + ref, err := reference.Parse(refSpec) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", refSpec) + } + named, ok := ref.(reference.Named) + if !ok { + return nil, nil, errors.Errorf("oci-layout reference %q has no name", ref.String()) + } + dgstd, ok := named.(reference.Digested) + if !ok { + return nil, nil, errors.Errorf("oci-layout reference %q has no digest", named.String()) + } + + // for the dummy ref primarily used in log messages, we can use the + // original name, since the store key may not be significant + dummyRef, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", name) + } + dummyRef, err = reference.WithDigest(dummyRef, dgstd.Digest()) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not wrap %q with digest", name) + } + + // TODO: How should source policy be handled here with a dummy ref? + _, dgst, data, err := bc.client.ResolveImageConfig(ctx, dummyRef.String(), llb.ResolveImageConfigOpt{ + Platform: opt.Platform, + ResolveMode: opt.ResolveMode, + LogName: fmt.Sprintf("[context %s] load metadata for %s", nameWithPlatform, dummyRef.String()), + ResolverType: llb.ResolverTypeOCILayout, + Store: llb.ResolveImageConfigOptStore{ + SessionID: bc.bopts.SessionID, + StoreID: named.Name(), + }, + }) + if err != nil { + return nil, nil, err + } + + var img image.Image + if err := json.Unmarshal(data, &img); err != nil { + return nil, nil, errors.Wrap(err, "could not parse oci-layout image config") + } + + ociOpt := []llb.OCILayoutOption{ + llb.WithCustomName("[context " + nameWithPlatform + "] OCI load from client"), + llb.OCIStore(bc.bopts.SessionID, named.Name()), + } + if opt.Platform != nil { + ociOpt = append(ociOpt, llb.Platform(*opt.Platform)) + } + st := llb.OCILayout( + dummyRef.String(), + ociOpt..., + ) + st, err = st.WithImageConfig(data) + if err != nil { + return nil, nil, err + } + if opt.CaptureDigest != nil { + *opt.CaptureDigest = dgst + } + return &st, &img, nil + case "local": + st := llb.Local(vv[1], + llb.SessionID(bc.bopts.SessionID), + llb.FollowPaths([]string{DefaultDockerignoreName}), + llb.SharedKeyHint("context:"+nameWithPlatform+"-"+DefaultDockerignoreName), + llb.WithCustomName("[context "+nameWithPlatform+"] load "+DefaultDockerignoreName), + llb.Differ(llb.DiffNone, false), + ) + def, err := st.Marshal(ctx) + if err != nil { + return nil, nil, err + } + res, err := bc.client.Solve(ctx, client.SolveRequest{ + Evaluate: true, + Definition: def.ToPB(), + }) + if err != nil { + return nil, nil, err + } + ref, err := res.SingleRef() + if err != nil { + return nil, nil, err + } + var excludes []string + if !opt.NoDockerignore { + dt, _ := ref.ReadFile(ctx, client.ReadRequest{ + Filename: DefaultDockerignoreName, + }) // error ignored + + if len(dt) != 0 { + excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dt)) + if err != nil { + return nil, nil, err + } + } + } + st = llb.Local(vv[1], + llb.WithCustomName("[context "+nameWithPlatform+"] load from client"), + llb.SessionID(bc.bopts.SessionID), + llb.SharedKeyHint("context:"+nameWithPlatform), + llb.ExcludePatterns(excludes), + ) + return &st, nil, nil + case "input": + inputs, err := bc.client.Inputs(ctx) + if err != nil { + return nil, nil, err + } + st, ok := inputs[vv[1]] + if !ok { + return nil, nil, errors.Errorf("invalid input %s for %s", vv[1], nameWithPlatform) + } + md, ok := opts[inputMetadataPrefix+vv[1]] + if ok { + m := make(map[string][]byte) + if err := json.Unmarshal([]byte(md), &m); err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md) + } + var img *image.Image + if dtic, ok := m[exptypes.ExporterImageConfigKey]; ok { + st, err = st.WithImageConfig(dtic) + if err != nil { + return nil, nil, err + } + if err := json.Unmarshal(dtic, &img); err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse image config for %s", nameWithPlatform) + } + } + return &st, img, nil + } + return &st, nil, nil + default: + return nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], nameWithPlatform) + } +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/requests.go b/vendor/github.com/moby/buildkit/frontend/dockerui/requests.go new file mode 100644 index 0000000000..7900a0c7a5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/requests.go @@ -0,0 +1,91 @@ +package dockerui + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/subrequests" + "github.com/moby/buildkit/frontend/subrequests/outline" + "github.com/moby/buildkit/frontend/subrequests/targets" + "github.com/moby/buildkit/solver/errdefs" +) + +const ( + keyRequestID = "requestid" +) + +type RequestHandler struct { + Outline func(context.Context) (*outline.Outline, error) + ListTargets func(context.Context) (*targets.List, error) + AllowOther bool +} + +func (bc *Client) HandleSubrequest(ctx context.Context, h RequestHandler) (*client.Result, bool, error) { + req, ok := bc.bopts.Opts[keyRequestID] + if !ok { + return nil, false, nil + } + switch req { + case subrequests.RequestSubrequestsDescribe: + res, err := describe(h) + return res, true, err + case outline.SubrequestsOutlineDefinition.Name: + if f := h.Outline; f != nil { + o, err := f(ctx) + if err != nil { + return nil, false, err + } + if o == nil { + return nil, true, nil + } + res, err := o.ToResult() + return res, true, err + } + case targets.SubrequestsTargetsDefinition.Name: + if f := h.ListTargets; f != nil { + targets, err := f(ctx) + if err != nil { + return nil, false, err + } + if targets == nil { + return nil, true, nil + } + res, err := targets.ToResult() + return res, true, err + } + } + if h.AllowOther { + return nil, false, nil + } + return nil, false, errdefs.NewUnsupportedSubrequestError(req) +} + +func describe(h RequestHandler) (*client.Result, error) { + all := []subrequests.Request{} + if h.Outline != nil { + all = append(all, outline.SubrequestsOutlineDefinition) + } + if h.ListTargets != nil { + all = append(all, targets.SubrequestsTargetsDefinition) + } + all = append(all, subrequests.SubrequestsDescribeDefinition) + dt, err := json.MarshalIndent(all, "", " ") + if err != nil { + return nil, err + } + + b := bytes.NewBuffer(nil) + if err := subrequests.PrintDescribe(dt, b); err != nil { + return nil, err + } + + res := client.NewResult() + res.Metadata = map[string][]byte{ + "result.json": dt, + "result.txt": b.Bytes(), + "version": []byte(subrequests.SubrequestsDescribeDefinition.Version), + } + return res, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/frontend.go b/vendor/github.com/moby/buildkit/frontend/frontend.go index 024ac80204..fb89a8414a 100644 --- a/vendor/github.com/moby/buildkit/frontend/frontend.go +++ b/vendor/github.com/moby/buildkit/frontend/frontend.go @@ -22,7 +22,7 @@ type Frontend interface { type FrontendLLBBridge interface { Solve(ctx context.Context, req SolveRequest, sid string) (*Result, error) - ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) + ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (string, digest.Digest, []byte, error) Warn(ctx context.Context, dgst digest.Digest, msg string, opts WarnOpts) error } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go index 7b6b9de132..23585de907 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go @@ -27,7 +27,7 @@ func NewResult() *Result { type Client interface { Solve(ctx context.Context, req SolveRequest) (*Result, error) - ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) + ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (string, digest.Digest, []byte, error) BuildOpts() BuildOpts Inputs(ctx context.Context) (map[string]llb.State, error) NewContainer(ctx context.Context, req NewContainerRequest) (Container, error) @@ -38,6 +38,7 @@ type Client interface { // new container, without defining the initial process. type NewContainerRequest struct { Mounts []Mount + Hostname string NetMode pb.NetMode ExtraHosts []*pb.HostIP Platform *pb.Platform @@ -70,6 +71,7 @@ type Container interface { type StartRequest struct { Args []string Env []string + SecretEnv []*pb.SecretEnv User string Cwd string Tty bool diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/container.go b/vendor/github.com/moby/buildkit/frontend/gateway/container/container.go similarity index 87% rename from vendor/github.com/moby/buildkit/frontend/gateway/container.go rename to vendor/github.com/moby/buildkit/frontend/gateway/container/container.go index d6161d1def..af6476e7fc 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/container.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/container/container.go @@ -1,4 +1,4 @@ -package gateway +package container import ( "context" @@ -10,7 +10,9 @@ import ( "sync" "syscall" + "github.com/moby/buildkit/session/secrets" "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/system" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/executor" @@ -18,6 +20,7 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/llbsolver/mounts" + "github.com/moby/buildkit/solver/pb" opspb "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/stack" utilsystem "github.com/moby/buildkit/util/system" @@ -29,6 +32,7 @@ import ( type NewContainerRequest struct { ContainerID string NetMode opspb.NetMode + Hostname string ExtraHosts []executor.HostIP Mounts []Mount Platform *opspb.Platform @@ -56,9 +60,12 @@ func NewContainer(ctx context.Context, w worker.Worker, sm *session.Manager, g s ctr := &gatewayContainer{ id: req.ContainerID, netMode: req.NetMode, + hostname: req.Hostname, extraHosts: req.ExtraHosts, platform: platform, executor: w.Executor(), + sm: sm, + group: g, errGroup: eg, ctx: ctx, cancel: cancel, @@ -86,7 +93,7 @@ func NewContainer(ctx context.Context, w worker.Worker, sm *session.Manager, g s cm = refs[m.Input].Worker.CacheManager() } return cm.New(ctx, ref, g) - }) + }, platform.OS) if err != nil { for i := len(p.Actives) - 1; i >= 0; i-- { // call in LIFO order p.Actives[i].Ref.Release(context.TODO()) @@ -136,7 +143,7 @@ type MountMutableRef struct { type MakeMutable func(m *opspb.Mount, ref cache.ImmutableRef) (cache.MutableRef, error) -func PrepareMounts(ctx context.Context, mm *mounts.MountManager, cm cache.Manager, g session.Group, cwd string, mnts []*opspb.Mount, refs []*worker.WorkerRef, makeMutable MakeMutable) (p PreparedMounts, err error) { +func PrepareMounts(ctx context.Context, mm *mounts.MountManager, cm cache.Manager, g session.Group, cwd string, mnts []*opspb.Mount, refs []*worker.WorkerRef, makeMutable MakeMutable, platform string) (p PreparedMounts, err error) { // loop over all mounts, fill in mounts, root and outputs for i, m := range mnts { var ( @@ -255,11 +262,11 @@ func PrepareMounts(ctx context.Context, mm *mounts.MountManager, cm cache.Manage }) root = active } - p.Root = mountWithSession(root, g) + p.Root = MountWithSession(root, g) } else { - mws := mountWithSession(mountable, g) + mws := MountWithSession(mountable, g) dest := m.Dest - if !filepath.IsAbs(filepath.Clean(dest)) { + if !system.IsAbs(filepath.Clean(dest), platform) { dest = filepath.Join("/", cwd, dest) } mws.Dest = dest @@ -280,11 +287,14 @@ func PrepareMounts(ctx context.Context, mm *mounts.MountManager, cm cache.Manage type gatewayContainer struct { id string netMode opspb.NetMode + hostname string extraHosts []executor.HostIP platform opspb.Platform rootFS executor.Mount mounts []executor.Mount executor executor.Executor + sm *session.Manager + group session.Group started bool errGroup *errgroup.Group mu sync.Mutex @@ -304,6 +314,7 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques Cwd: req.Cwd, Tty: req.Tty, NetMode: gwCtr.netMode, + Hostname: gwCtr.hostname, ExtraHosts: gwCtr.extraHosts, SecurityMode: req.SecurityMode, RemoveMountStubsRecursive: req.RemoveMountStubsRecursive, @@ -322,6 +333,12 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques procInfo.Meta.Env = addDefaultEnvvar(procInfo.Meta.Env, "TERM", "xterm") } + secretEnv, err := gwCtr.loadSecretEnv(ctx, req.SecretEnv) + if err != nil { + return nil, err + } + procInfo.Meta.Env = append(procInfo.Meta.Env, secretEnv...) + // mark that we have started on the first call to execProcess for this // container, so that future calls will call Exec rather than Run gwCtr.mu.Lock() @@ -341,7 +358,7 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques startedCh := make(chan struct{}) gwProc.errGroup.Go(func() error { bklog.G(gwCtr.ctx).Debugf("Starting new container for %s with args: %q", gwCtr.id, procInfo.Meta.Args) - err := gwCtr.executor.Run(ctx, gwCtr.id, gwCtr.rootFS, gwCtr.mounts, procInfo, startedCh) + _, err := gwCtr.executor.Run(ctx, gwCtr.id, gwCtr.rootFS, gwCtr.mounts, procInfo, startedCh) return stack.Enable(err) }) select { @@ -361,6 +378,33 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques return gwProc, nil } +func (gwCtr *gatewayContainer) loadSecretEnv(ctx context.Context, secretEnv []*pb.SecretEnv) ([]string, error) { + out := make([]string, 0, len(secretEnv)) + for _, sopt := range secretEnv { + id := sopt.ID + if id == "" { + return nil, errors.Errorf("secret ID missing for %q environment variable", sopt.Name) + } + var dt []byte + var err error + err = gwCtr.sm.Any(ctx, gwCtr.group, func(ctx context.Context, _ string, caller session.Caller) error { + dt, err = secrets.GetSecret(ctx, caller, id) + if err != nil { + if errors.Is(err, secrets.ErrNotFound) && sopt.Optional { + return nil + } + return err + } + return nil + }) + if err != nil { + return nil, err + } + out = append(out, fmt.Sprintf("%s=%s", sopt.Name, string(dt))) + } + return out, nil +} + func (gwCtr *gatewayContainer) Release(ctx context.Context) error { gwCtr.mu.Lock() defer gwCtr.mu.Unlock() @@ -458,7 +502,7 @@ func addDefaultEnvvar(env []string, k, v string) []string { return append(env, k+"="+v) } -func mountWithSession(m cache.Mountable, g session.Group) executor.Mount { +func MountWithSession(m cache.Mountable, g session.Group) executor.Mount { _, readonly := m.(cache.ImmutableRef) return executor.Mount{ Src: &mountable{m: m, g: g}, diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/util.go b/vendor/github.com/moby/buildkit/frontend/gateway/container/util.go similarity index 96% rename from vendor/github.com/moby/buildkit/frontend/gateway/util.go rename to vendor/github.com/moby/buildkit/frontend/gateway/container/util.go index 0de8353402..1a1fb25138 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/util.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/container/util.go @@ -1,4 +1,4 @@ -package gateway +package container import ( "net" diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go index e13894ba37..0f4da47cbd 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go @@ -7,8 +7,8 @@ import ( cacheutil "github.com/moby/buildkit/cache/util" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/frontend" - "github.com/moby/buildkit/frontend/gateway" "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/gateway/container" gwpb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" @@ -26,8 +26,8 @@ import ( "golang.org/x/sync/errgroup" ) -func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, w worker.Infos, sid string, sm *session.Manager) (*bridgeClient, error) { - bc := &bridgeClient{ +func LLBBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, w worker.Infos, sid string, sm *session.Manager) (*BridgeClient, error) { + bc := &BridgeClient{ opts: opts, inputs: inputs, FrontendLLBBridge: llbBridge, @@ -40,7 +40,7 @@ func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLL return bc, nil } -type bridgeClient struct { +type BridgeClient struct { frontend.FrontendLLBBridge mu sync.Mutex opts map[string]string @@ -54,7 +54,7 @@ type bridgeClient struct { ctrs []client.Container } -func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*client.Result, error) { +func (c *BridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*client.Result, error) { res, err := c.FrontendLLBBridge.Solve(ctx, frontend.SolveRequest{ Evaluate: req.Evaluate, Definition: req.Definition, @@ -91,7 +91,7 @@ func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*cli return cRes, nil } -func (c *bridgeClient) loadBuildOpts() client.BuildOpts { +func (c *BridgeClient) loadBuildOpts() client.BuildOpts { wis := c.workers.WorkerInfos() workers := make([]client.WorkerInfo, len(wis)) for i, w := range wis { @@ -112,11 +112,11 @@ func (c *bridgeClient) loadBuildOpts() client.BuildOpts { } } -func (c *bridgeClient) BuildOpts() client.BuildOpts { +func (c *BridgeClient) BuildOpts() client.BuildOpts { return c.buildOpts } -func (c *bridgeClient) Inputs(ctx context.Context) (map[string]llb.State, error) { +func (c *BridgeClient) Inputs(ctx context.Context) (map[string]llb.State, error) { inputs := make(map[string]llb.State) for key, def := range c.inputs { defop, err := llb.NewDefinitionOp(def) @@ -128,7 +128,7 @@ func (c *bridgeClient) Inputs(ctx context.Context) (map[string]llb.State, error) return inputs, nil } -func (c *bridgeClient) wrapSolveError(solveErr error) error { +func (c *BridgeClient) wrapSolveError(solveErr error) error { var ( ee *llberrdefs.ExecError fae *llberrdefs.FileActionError @@ -162,7 +162,7 @@ func (c *bridgeClient) wrapSolveError(solveErr error) error { return errdefs.WithSolveError(solveErr, subject, inputIDs, mountIDs) } -func (c *bridgeClient) registerResultIDs(results ...solver.Result) (ids []string, err error) { +func (c *BridgeClient) registerResultIDs(results ...solver.Result) (ids []string, err error) { c.mu.Lock() defer c.mu.Unlock() @@ -181,7 +181,7 @@ func (c *bridgeClient) registerResultIDs(results ...solver.Result) (ids []string return ids, nil } -func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, error) { +func (c *BridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, error) { if r == nil { return nil, nil } @@ -206,7 +206,7 @@ func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, err return res, nil } -func (c *bridgeClient) discard(err error) { +func (c *BridgeClient) discard(err error) { for _, ctr := range c.ctrs { ctr.Release(context.TODO()) } @@ -227,15 +227,16 @@ func (c *bridgeClient) discard(err error) { } } -func (c *bridgeClient) Warn(ctx context.Context, dgst digest.Digest, msg string, opts client.WarnOpts) error { +func (c *BridgeClient) Warn(ctx context.Context, dgst digest.Digest, msg string, opts client.WarnOpts) error { return c.FrontendLLBBridge.Warn(ctx, dgst, msg, opts) } -func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainerRequest) (client.Container, error) { - ctrReq := gateway.NewContainerRequest{ +func (c *BridgeClient) NewContainer(ctx context.Context, req client.NewContainerRequest) (client.Container, error) { + ctrReq := container.NewContainerRequest{ ContainerID: identity.NewID(), NetMode: req.NetMode, - Mounts: make([]gateway.Mount, len(req.Mounts)), + Hostname: req.Hostname, + Mounts: make([]container.Mount, len(req.Mounts)), } eg, ctx := errgroup.WithContext(ctx) @@ -266,7 +267,7 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer return errors.Errorf("failed to find ref %s for %q mount", m.ResultID, m.Dest) } } - ctrReq.Mounts[i] = gateway.Mount{ + ctrReq.Mounts[i] = container.Mount{ WorkerRef: workerRef, Mount: &opspb.Mount{ Dest: m.Dest, @@ -287,7 +288,7 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer return nil, err } - ctrReq.ExtraHosts, err = gateway.ParseExtraHosts(req.ExtraHosts) + ctrReq.ExtraHosts, err = container.ParseExtraHosts(req.ExtraHosts) if err != nil { return nil, err } @@ -298,7 +299,7 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer } group := session.NewGroup(c.sid) - ctr, err := gateway.NewContainer(ctx, w, c.sm, group, ctrReq) + ctr, err := container.NewContainer(ctx, w, c.sm, group, ctrReq) if err != nil { return nil, err } @@ -306,7 +307,7 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer return ctr, nil } -func (c *bridgeClient) newRef(r solver.ResultProxy, s session.Group) (*ref, error) { +func (c *BridgeClient) newRef(r solver.ResultProxy, s session.Group) (*ref, error) { return &ref{resultProxy: r, session: s, c: c}, nil } @@ -315,7 +316,7 @@ type ref struct { resultProxyClones []solver.ResultProxy session session.Group - c *bridgeClient + c *BridgeClient } func (r *ref) acquireResultProxy() solver.ResultProxy { diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go index 7cd25a0e8e..ae144162c9 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go @@ -23,7 +23,7 @@ type GatewayForwarder struct { } func (gf *GatewayForwarder) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*pb.Definition, sid string, sm *session.Manager) (retRes *frontend.Result, retErr error) { - c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts, inputs, gf.workers, sid, sm) + c, err := LLBBridgeToGatewayClient(ctx, llbBridge, opts, inputs, gf.workers, sid, sm) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go index 79825d0b65..eb1fb2a2b0 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go @@ -27,8 +27,12 @@ import ( "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/containerimage/image" "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/dockerui" gwclient "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/gateway/container" + "github.com/moby/buildkit/frontend/gateway/forwarder" pb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" @@ -89,7 +93,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten } _, isDevel := opts[keyDevel] - var img ocispecs.Image + var img image.Image var mfstDigest digest.Digest var rootFS cache.MutableRef var readonly bool // TODO: try to switch to read-only by default. @@ -137,31 +141,57 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten } } } else { - sourceRef, err := reference.ParseNormalizedNamed(source) + c, err := forwarder.LLBBridgeToGatewayClient(ctx, llbBridge, opts, inputs, gf.workers, sid, sm) if err != nil { return nil, err } - - dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String(), llb.ResolveImageConfigOpt{}) + dc, err := dockerui.NewClient(c) if err != nil { return nil, err } - mfstDigest = dgst - - if err := json.Unmarshal(config, &img); err != nil { + st, dockerImage, err := dc.NamedContext(ctx, source, dockerui.ContextOpt{ + CaptureDigest: &mfstDigest, + }) + if err != nil { return nil, err } - - if dgst != "" { - sourceRef, err = reference.WithDigest(sourceRef, dgst) + if dockerImage != nil { + img = *dockerImage + } + if st == nil { + sourceRef, err := reference.ParseNormalizedNamed(source) if err != nil { return nil, err } + + ref, dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String(), llb.ResolveImageConfigOpt{}) + if err != nil { + return nil, err + } + + sourceRef, err = reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + + mfstDigest = dgst + + if err := json.Unmarshal(config, &img); err != nil { + return nil, err + } + + if dgst != "" { + sourceRef, err = reference.WithDigest(sourceRef, dgst) + if err != nil { + return nil, err + } + } + + src := llb.Image(sourceRef.String(), &markTypeFrontend{}) + st = &src } - src := llb.Image(sourceRef.String(), &markTypeFrontend{}) - - def, err := src.Marshal(ctx) + def, err := st.Marshal(ctx) if err != nil { return nil, err } @@ -275,8 +305,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten mnts = append(mnts, *mdmnt) } - err = w.Executor().Run(ctx, "", mountWithSession(rootFS, session.NewGroup(sid)), mnts, executor.ProcessInfo{Meta: meta, Stdin: lbf.Stdin, Stdout: lbf.Stdout, Stderr: os.Stderr}, nil) - + _, err = w.Executor().Run(ctx, "", container.MountWithSession(rootFS, session.NewGroup(sid)), mnts, executor.ProcessInfo{Meta: meta, Stdin: lbf.Stdin, Stdout: lbf.Stdout, Stderr: os.Stderr}, nil) if err != nil { if errdefs.IsCanceled(ctx, err) && lbf.isErrServerClosed { err = errors.Errorf("frontend grpc server closed unexpectedly") @@ -540,7 +569,7 @@ func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.R OSFeatures: p.OSFeatures, } } - dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref, llb.ResolveImageConfigOpt{ + ref, dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref, llb.ResolveImageConfigOpt{ ResolverType: llb.ResolverType(req.ResolverType), Platform: platform, ResolveMode: req.ResolveMode, @@ -549,11 +578,13 @@ func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.R SessionID: req.SessionID, StoreID: req.StoreID, }, + SourcePolicies: req.SourcePolicies, }) if err != nil { return nil, err } return &pb.ResolveImageConfigResponse{ + Ref: ref, Digest: dgst, Config: dt, }, nil @@ -954,9 +985,10 @@ func (lbf *llbBridgeForwarder) Inputs(ctx context.Context, in *pb.InputsRequest) func (lbf *llbBridgeForwarder) NewContainer(ctx context.Context, in *pb.NewContainerRequest) (_ *pb.NewContainerResponse, err error) { bklog.G(ctx).Debugf("|<--- NewContainer %s", in.ContainerID) - ctrReq := NewContainerRequest{ + ctrReq := container.NewContainerRequest{ ContainerID: in.ContainerID, NetMode: in.Network, + Hostname: in.Hostname, Platform: in.Platform, Constraints: in.Constraints, } @@ -983,7 +1015,7 @@ func (lbf *llbBridgeForwarder) NewContainer(ctx context.Context, in *pb.NewConta } } } - ctrReq.Mounts = append(ctrReq.Mounts, Mount{ + ctrReq.Mounts = append(ctrReq.Mounts, container.Mount{ WorkerRef: workerRef, Mount: &opspb.Mount{ Dest: m.Dest, @@ -1006,12 +1038,12 @@ func (lbf *llbBridgeForwarder) NewContainer(ctx context.Context, in *pb.NewConta return nil, stack.Enable(err) } - ctrReq.ExtraHosts, err = ParseExtraHosts(in.ExtraHosts) + ctrReq.ExtraHosts, err = container.ParseExtraHosts(in.ExtraHosts) if err != nil { return nil, stack.Enable(err) } - ctr, err := NewContainer(context.Background(), w, lbf.sm, group, ctrReq) + ctr, err := container.NewContainer(context.Background(), w, lbf.sm, group, ctrReq) if err != nil { return nil, stack.Enable(err) } @@ -1175,7 +1207,21 @@ func (w *outputWriter) Write(msg []byte) (int, error) { return len(msg), stack.Enable(err) } +type execProcessServerThreadSafe struct { + pb.LLBBridge_ExecProcessServer + sendMu sync.Mutex +} + +func (w *execProcessServerThreadSafe) Send(m *pb.ExecMessage) error { + w.sendMu.Lock() + defer w.sendMu.Unlock() + return w.LLBBridge_ExecProcessServer.Send(m) +} + func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) error { + srv = &execProcessServerThreadSafe{ + LLBBridge_ExecProcessServer: srv, + } eg, ctx := errgroup.WithContext(srv.Context()) msgs := make(chan *pb.ExecMessage) @@ -1285,6 +1331,7 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e proc, err := ctr.Start(initCtx, gwclient.StartRequest{ Args: init.Meta.Args, Env: init.Meta.Env, + SecretEnv: init.Secretenv, User: init.Meta.User, Cwd: init.Meta.Cwd, Tty: init.Tty, @@ -1323,7 +1370,7 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e var statusError *rpc.Status if err != nil { statusCode = pb.UnknownExitStatus - st, _ := status.FromError(grpcerrors.ToGRPC(err)) + st, _ := status.FromError(grpcerrors.ToGRPC(ctx, err)) stp := st.Proto() statusError = &rpc.Status{ Code: stp.Code, diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go index 252617ffa0..524b3ba2a9 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go @@ -190,7 +190,7 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro } } if retError != nil { - st, _ := status.FromError(grpcerrors.ToGRPC(retError)) + st, _ := status.FromError(grpcerrors.ToGRPC(ctx, retError)) stp := st.Proto() req.Error = &rpc.Status{ Code: stp.Code, @@ -478,7 +478,7 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * return res, nil } -func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) { +func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (string, digest.Digest, []byte, error) { var p *opspb.Platform if platform := opt.Platform; platform != nil { p = &opspb.Platform{ @@ -489,19 +489,27 @@ func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb OSFeatures: platform.OSFeatures, } } + resp, err := c.client.ResolveImageConfig(ctx, &pb.ResolveImageConfigRequest{ - ResolverType: int32(opt.ResolverType), - Ref: ref, - Platform: p, - ResolveMode: opt.ResolveMode, - LogName: opt.LogName, - SessionID: opt.Store.SessionID, - StoreID: opt.Store.StoreID, + ResolverType: int32(opt.ResolverType), + Ref: ref, + Platform: p, + ResolveMode: opt.ResolveMode, + LogName: opt.LogName, + SessionID: opt.Store.SessionID, + StoreID: opt.Store.StoreID, + SourcePolicies: opt.SourcePolicies, }) if err != nil { - return "", nil, err + return "", "", nil, err } - return resp.Digest, resp.Config, nil + newRef := resp.Ref + if newRef == "" { + // No ref returned, use the original one. + // This could occur if the version of buildkitd is too old. + newRef = ref + } + return newRef, resp.Digest, resp.Config, nil } func (c *grpcClient) BuildOpts() client.BuildOpts { @@ -792,6 +800,7 @@ func (c *grpcClient) NewContainer(ctx context.Context, req client.NewContainerRe Constraints: req.Constraints, Network: req.NetMode, ExtraHosts: req.ExtraHosts, + Hostname: req.Hostname, }) if err != nil { return nil, err @@ -805,6 +814,7 @@ func (c *grpcClient) NewContainer(ctx context.Context, req client.NewContainerRe return &container{ client: c.client, + caps: c.caps, id: id, execMsgs: c.execMsgs, }, nil @@ -812,6 +822,7 @@ func (c *grpcClient) NewContainer(ctx context.Context, req client.NewContainerRe type container struct { client pb.LLBBridgeClient + caps apicaps.CapSet id string execMsgs *messageForwarder } @@ -820,6 +831,12 @@ func (ctr *container) Start(ctx context.Context, req client.StartRequest) (clien pid := fmt.Sprintf("%s:%s", ctr.id, identity.NewID()) msgs := ctr.execMsgs.Register(pid) + if len(req.SecretEnv) > 0 { + if err := ctr.caps.Supports(pb.CapGatewayExecSecretEnv); err != nil { + return nil, err + } + } + init := &pb.InitMessage{ ContainerID: ctr.id, Meta: &opspb.Meta{ @@ -828,8 +845,9 @@ func (ctr *container) Start(ctx context.Context, req client.StartRequest) (clien Cwd: req.Cwd, User: req.User, }, - Tty: req.Tty, - Security: req.SecurityMode, + Tty: req.Tty, + Security: req.SecurityMode, + Secretenv: req.SecretEnv, } init.Meta.RemoveMountStubsRecursive = req.RemoveMountStubsRecursive if req.Stdin != nil { diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go index deb192dc11..14c6c71ab0 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go @@ -44,6 +44,10 @@ const ( // /etc/hosts for containers created via gateway exec. CapGatewayExecExtraHosts apicaps.CapID = "gateway.exec.extrahosts" + // CapGatewayExecExtraHosts is the capability to set secrets as env vars for + // containers created via gateway exec. + CapGatewayExecSecretEnv apicaps.CapID = "gateway.exec.secretenv" + // CapGatewayExecExtraHosts is the capability to send signals to a process // created via gateway exec. CapGatewayExecSignals apicaps.CapID = "gateway.exec.signals" @@ -179,6 +183,13 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapGatewayExecSecretEnv, + Name: "gateway exec secret env", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapGatewayExecSignals, Name: "gateway exec signals", diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go index ec012f615c..d978bfa668 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go @@ -3,7 +3,7 @@ package moby_buildkit_v1_frontend //nolint:revive import ( "fmt" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" "github.com/moby/buildkit/util/grpcerrors" ) diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go index da36afdd14..4849adeea9 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go @@ -736,16 +736,17 @@ func (m *InputsResponse) GetDefinitions() map[string]*pb.Definition { } type ResolveImageConfigRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform,proto3" json:"Platform,omitempty"` - ResolveMode string `protobuf:"bytes,3,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"` - LogName string `protobuf:"bytes,4,opt,name=LogName,proto3" json:"LogName,omitempty"` - ResolverType int32 `protobuf:"varint,5,opt,name=ResolverType,proto3" json:"ResolverType,omitempty"` - SessionID string `protobuf:"bytes,6,opt,name=SessionID,proto3" json:"SessionID,omitempty"` - StoreID string `protobuf:"bytes,7,opt,name=StoreID,proto3" json:"StoreID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform,proto3" json:"Platform,omitempty"` + ResolveMode string `protobuf:"bytes,3,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"` + LogName string `protobuf:"bytes,4,opt,name=LogName,proto3" json:"LogName,omitempty"` + ResolverType int32 `protobuf:"varint,5,opt,name=ResolverType,proto3" json:"ResolverType,omitempty"` + SessionID string `protobuf:"bytes,6,opt,name=SessionID,proto3" json:"SessionID,omitempty"` + StoreID string `protobuf:"bytes,7,opt,name=StoreID,proto3" json:"StoreID,omitempty"` + SourcePolicies []*pb1.Policy `protobuf:"bytes,8,rep,name=SourcePolicies,proto3" json:"SourcePolicies,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ResolveImageConfigRequest) Reset() { *m = ResolveImageConfigRequest{} } @@ -830,9 +831,17 @@ func (m *ResolveImageConfigRequest) GetStoreID() string { return "" } +func (m *ResolveImageConfigRequest) GetSourcePolicies() []*pb1.Policy { + if m != nil { + return m.SourcePolicies + } + return nil +} + type ResolveImageConfigResponse struct { Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=Digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"Digest"` Config []byte `protobuf:"bytes,2,opt,name=Config,proto3" json:"Config,omitempty"` + Ref string `protobuf:"bytes,3,opt,name=Ref,proto3" json:"Ref,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -878,6 +887,13 @@ func (m *ResolveImageConfigResponse) GetConfig() []byte { return nil } +func (m *ResolveImageConfigResponse) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + type SolveRequest struct { Definition *pb.Definition `protobuf:"bytes,1,opt,name=Definition,proto3" json:"Definition,omitempty"` Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"` @@ -1823,6 +1839,7 @@ type NewContainerRequest struct { Platform *pb.Platform `protobuf:"bytes,4,opt,name=platform,proto3" json:"platform,omitempty"` Constraints *pb.WorkerConstraints `protobuf:"bytes,5,opt,name=constraints,proto3" json:"constraints,omitempty"` ExtraHosts []*pb.HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` + Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1903,6 +1920,13 @@ func (m *NewContainerRequest) GetExtraHosts() []*pb.HostIP { return nil } +func (m *NewContainerRequest) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + type NewContainerResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -2196,6 +2220,7 @@ type InitMessage struct { Fds []uint32 `protobuf:"varint,3,rep,packed,name=Fds,proto3" json:"Fds,omitempty"` Tty bool `protobuf:"varint,4,opt,name=Tty,proto3" json:"Tty,omitempty"` Security pb.SecurityMode `protobuf:"varint,5,opt,name=Security,proto3,enum=pb.SecurityMode" json:"Security,omitempty"` + Secretenv []*pb.SecretEnv `protobuf:"bytes,6,rep,name=secretenv,proto3" json:"secretenv,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2269,6 +2294,13 @@ func (m *InitMessage) GetSecurity() pb.SecurityMode { return pb.SecurityMode_SANDBOX } +func (m *InitMessage) GetSecretenv() []*pb.SecretEnv { + if m != nil { + return m.Secretenv + } + return nil +} + type ExitMessage struct { Code uint32 `protobuf:"varint,1,opt,name=Code,proto3" json:"Code,omitempty"` Error *rpc.Status `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` @@ -2627,161 +2659,164 @@ func init() { func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } var fileDescriptor_f1a937782ebbded5 = []byte{ - // 2452 bytes of a gzipped FileDescriptorProto + // 2497 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x59, 0xcf, 0x6f, 0x1b, 0xc7, - 0xf5, 0xd7, 0x8a, 0x14, 0x25, 0x3d, 0x52, 0x14, 0x3d, 0x76, 0xf2, 0xa5, 0x17, 0x81, 0x23, 0xaf, - 0x63, 0x45, 0x56, 0x9c, 0xa5, 0xbf, 0xb2, 0x0d, 0xb9, 0x76, 0xeb, 0xc4, 0xfa, 0x05, 0x29, 0x96, - 0x6c, 0x76, 0xe4, 0xc2, 0x45, 0x90, 0x02, 0x5d, 0x71, 0x87, 0xf4, 0xd6, 0xab, 0xdd, 0xed, 0xee, - 0xd0, 0x32, 0x93, 0x4b, 0x7b, 0x28, 0x50, 0xe4, 0xd4, 0x53, 0x6f, 0x41, 0x81, 0x16, 0xe8, 0xb9, - 0xfd, 0x03, 0xda, 0x73, 0x80, 0x5e, 0x7a, 0xee, 0x21, 0x28, 0xfc, 0x0f, 0xf4, 0x56, 0xa0, 0xb7, - 0xe2, 0xcd, 0xcc, 0x92, 0xc3, 0x1f, 0x5a, 0x92, 0xf5, 0x89, 0x33, 0x6f, 0xde, 0x8f, 0x79, 0xef, - 0xcd, 0x7b, 0xf3, 0x99, 0x25, 0x2c, 0xb5, 0x1c, 0xce, 0xce, 0x9c, 0x8e, 0x1d, 0xc5, 0x21, 0x0f, - 0xc9, 0xe5, 0xd3, 0xf0, 0xa4, 0x63, 0x9f, 0xb4, 0x3d, 0xdf, 0x7d, 0xe9, 0x71, 0xfb, 0xd5, 0xff, - 0xdb, 0xcd, 0x38, 0x0c, 0x38, 0x0b, 0x5c, 0xf3, 0xe3, 0x96, 0xc7, 0x5f, 0xb4, 0x4f, 0xec, 0x46, - 0x78, 0x5a, 0x6b, 0x85, 0xad, 0xb0, 0x26, 0x24, 0x4e, 0xda, 0x4d, 0x31, 0x13, 0x13, 0x31, 0x92, - 0x9a, 0xcc, 0x8d, 0x41, 0xf6, 0x56, 0x18, 0xb6, 0x7c, 0xe6, 0x44, 0x5e, 0xa2, 0x86, 0xb5, 0x38, - 0x6a, 0xd4, 0x12, 0xee, 0xf0, 0x76, 0xa2, 0x64, 0x6e, 0x6a, 0x32, 0xb8, 0x91, 0x5a, 0xba, 0x91, - 0x5a, 0x12, 0xfa, 0xaf, 0x58, 0x5c, 0x8b, 0x4e, 0x6a, 0x61, 0x94, 0x72, 0xd7, 0xce, 0xe5, 0x76, - 0x22, 0xaf, 0xc6, 0x3b, 0x11, 0x4b, 0x6a, 0x67, 0x61, 0xfc, 0x92, 0xc5, 0x4a, 0xe0, 0xf6, 0xb9, - 0x02, 0x6d, 0xee, 0xf9, 0x28, 0xd5, 0x70, 0xa2, 0x04, 0x8d, 0xe0, 0xaf, 0x12, 0xd2, 0xdd, 0xe6, - 0x61, 0xe0, 0x25, 0xdc, 0xf3, 0x5a, 0x5e, 0xad, 0x99, 0x08, 0x19, 0x69, 0x05, 0x9d, 0x50, 0xec, - 0x77, 0x33, 0x5c, 0x68, 0xc7, 0x0d, 0x16, 0x85, 0xbe, 0xd7, 0xe8, 0xa0, 0x0d, 0x39, 0x92, 0x62, - 0xd6, 0xdf, 0xf2, 0x50, 0xa0, 0x2c, 0x69, 0xfb, 0x9c, 0xac, 0xc2, 0x52, 0xcc, 0x9a, 0x3b, 0x2c, - 0x8a, 0x59, 0xc3, 0xe1, 0xcc, 0xad, 0x1a, 0x2b, 0xc6, 0xda, 0xe2, 0xfe, 0x0c, 0xed, 0x27, 0x93, - 0x1f, 0x41, 0x39, 0x66, 0xcd, 0x44, 0x63, 0x9c, 0x5d, 0x31, 0xd6, 0x8a, 0x1b, 0x1f, 0xd9, 0xe7, - 0xe6, 0xd0, 0xa6, 0xac, 0x79, 0xe4, 0x44, 0x3d, 0x91, 0xfd, 0x19, 0x3a, 0xa0, 0x84, 0x6c, 0x40, - 0x2e, 0x66, 0xcd, 0x6a, 0x4e, 0xe8, 0xba, 0x92, 0xad, 0x6b, 0x7f, 0x86, 0x22, 0x33, 0xd9, 0x84, - 0x3c, 0x6a, 0xa9, 0xe6, 0x85, 0xd0, 0xd5, 0xb1, 0x1b, 0xd8, 0x9f, 0xa1, 0x42, 0x80, 0x3c, 0x86, - 0x85, 0x53, 0xc6, 0x1d, 0xd7, 0xe1, 0x4e, 0x15, 0x56, 0x72, 0x6b, 0xc5, 0x8d, 0x5a, 0xa6, 0x30, - 0x06, 0xc8, 0x3e, 0x52, 0x12, 0xbb, 0x01, 0x8f, 0x3b, 0xb4, 0xab, 0x80, 0x3c, 0x87, 0x92, 0xc3, - 0x39, 0xc3, 0x64, 0x78, 0x61, 0x90, 0x54, 0x4b, 0x42, 0xe1, 0xed, 0xf1, 0x0a, 0x1f, 0x69, 0x52, - 0x52, 0x69, 0x9f, 0x22, 0xf3, 0x01, 0x2c, 0xf5, 0xd9, 0x24, 0x15, 0xc8, 0xbd, 0x64, 0x1d, 0x99, - 0x18, 0x8a, 0x43, 0x72, 0x09, 0xe6, 0x5e, 0x39, 0x7e, 0x9b, 0x89, 0x1c, 0x94, 0xa8, 0x9c, 0xdc, - 0x9f, 0xbd, 0x67, 0x98, 0x2f, 0xe0, 0xc2, 0x90, 0xfe, 0x11, 0x0a, 0x7e, 0xa0, 0x2b, 0x28, 0x6e, - 0x7c, 0x98, 0xb1, 0x6b, 0x5d, 0x9d, 0x66, 0x69, 0x6b, 0x01, 0x0a, 0xb1, 0x70, 0xc8, 0xfa, 0xad, - 0x01, 0x95, 0xc1, 0x54, 0x93, 0x03, 0x95, 0x24, 0x43, 0x84, 0xe5, 0xee, 0x14, 0xa7, 0x04, 0x09, - 0x2a, 0x30, 0x42, 0x85, 0xb9, 0x09, 0x8b, 0x5d, 0xd2, 0xb8, 0x60, 0x2c, 0x6a, 0x5b, 0xb4, 0x36, - 0x21, 0x47, 0x59, 0x93, 0x94, 0x61, 0xd6, 0x53, 0xe7, 0x9a, 0xce, 0x7a, 0x2e, 0x59, 0x81, 0x9c, - 0xcb, 0x9a, 0xca, 0xf5, 0xb2, 0x1d, 0x9d, 0xd8, 0x3b, 0xac, 0xe9, 0x05, 0x1e, 0xba, 0x48, 0x71, - 0xc9, 0xfa, 0xbd, 0x81, 0xf5, 0x81, 0xdb, 0x22, 0x9f, 0xf4, 0xf9, 0x31, 0xfe, 0xb4, 0x0f, 0xed, - 0xfe, 0x79, 0xf6, 0xee, 0xef, 0xf4, 0x67, 0x62, 0x4c, 0x09, 0xe8, 0xde, 0xfd, 0x18, 0x4a, 0x7a, - 0x6e, 0xc8, 0x3e, 0x14, 0xb5, 0x73, 0xa4, 0x36, 0xbc, 0x3a, 0x59, 0x66, 0xa9, 0x2e, 0x6a, 0xfd, - 0x31, 0x07, 0x45, 0x6d, 0x91, 0x3c, 0x84, 0xfc, 0x4b, 0x2f, 0x90, 0x21, 0x2c, 0x6f, 0xac, 0x4f, - 0xa6, 0xf2, 0xb1, 0x17, 0xb8, 0x54, 0xc8, 0x91, 0xba, 0x56, 0x77, 0xb3, 0x62, 0x5b, 0x77, 0x26, - 0xd3, 0x71, 0x6e, 0xf1, 0xdd, 0x9a, 0xa2, 0x6d, 0xc8, 0xa6, 0x41, 0x20, 0x1f, 0x39, 0xfc, 0x85, - 0x68, 0x1a, 0x8b, 0x54, 0x8c, 0xc9, 0x2d, 0xb8, 0xe8, 0x05, 0xcf, 0x42, 0x1e, 0xd6, 0x63, 0xe6, - 0x7a, 0x78, 0xf8, 0x9e, 0x75, 0x22, 0x56, 0x9d, 0x13, 0x2c, 0xa3, 0x96, 0x48, 0x1d, 0xca, 0x92, - 0x7c, 0xdc, 0x3e, 0xf9, 0x19, 0x6b, 0xf0, 0xa4, 0x5a, 0x10, 0xfe, 0xac, 0x65, 0x6c, 0xe1, 0x40, - 0x17, 0xa0, 0x03, 0xf2, 0x6f, 0x55, 0xed, 0xd6, 0x9f, 0x0d, 0x58, 0xea, 0x53, 0x4f, 0x3e, 0xed, - 0x4b, 0xd5, 0xcd, 0x49, 0xb7, 0xa5, 0x25, 0xeb, 0x33, 0x28, 0xb8, 0x5e, 0x8b, 0x25, 0x5c, 0xa4, - 0x6a, 0x71, 0x6b, 0xe3, 0xdb, 0xef, 0xde, 0x9f, 0xf9, 0xc7, 0x77, 0xef, 0xaf, 0x6b, 0x57, 0x4d, - 0x18, 0xb1, 0xa0, 0x11, 0x06, 0xdc, 0xf1, 0x02, 0x16, 0xe3, 0x05, 0xfb, 0xb1, 0x14, 0xb1, 0x77, - 0xc4, 0x0f, 0x55, 0x1a, 0x30, 0xe8, 0x81, 0x73, 0xca, 0x44, 0x9e, 0x16, 0xa9, 0x18, 0x5b, 0x1c, - 0x96, 0x28, 0xe3, 0xed, 0x38, 0xa0, 0xec, 0xe7, 0x6d, 0x64, 0xfa, 0x5e, 0xda, 0x48, 0xc4, 0xa6, - 0xc7, 0x35, 0x74, 0x64, 0xa4, 0x4a, 0x80, 0xac, 0xc1, 0x1c, 0x8b, 0xe3, 0x30, 0x56, 0xc5, 0x43, - 0x6c, 0x79, 0xd5, 0xdb, 0x71, 0xd4, 0xb0, 0x8f, 0xc5, 0x55, 0x4f, 0x25, 0x83, 0x55, 0x81, 0x72, - 0x6a, 0x35, 0x89, 0xc2, 0x20, 0x61, 0xd6, 0x32, 0x86, 0x2e, 0x6a, 0xf3, 0x44, 0xed, 0xc3, 0xfa, - 0xab, 0x01, 0xe5, 0x94, 0x22, 0x79, 0xc8, 0x17, 0x50, 0xec, 0xb5, 0x86, 0xb4, 0x07, 0xdc, 0xcf, - 0x0c, 0xaa, 0x2e, 0xaf, 0xf5, 0x15, 0xd5, 0x12, 0x74, 0x75, 0xe6, 0x13, 0xa8, 0x0c, 0x32, 0x8c, - 0xc8, 0xfe, 0x07, 0xfd, 0x0d, 0x62, 0xb0, 0x5f, 0x69, 0xa7, 0xe1, 0x5f, 0x06, 0x5c, 0xa6, 0x4c, - 0x60, 0x97, 0x83, 0x53, 0xa7, 0xc5, 0xb6, 0xc3, 0xa0, 0xe9, 0xb5, 0xd2, 0x30, 0x57, 0x44, 0x33, - 0x4c, 0x35, 0x63, 0x5f, 0x5c, 0x83, 0x85, 0xba, 0xef, 0xf0, 0x66, 0x18, 0x9f, 0x2a, 0xe5, 0x25, - 0x54, 0x9e, 0xd2, 0x68, 0x77, 0x95, 0xac, 0x40, 0x51, 0x29, 0x3e, 0x0a, 0xdd, 0x34, 0x9d, 0x3a, - 0x89, 0x54, 0x61, 0xfe, 0x30, 0x6c, 0x3d, 0xc1, 0x64, 0xcb, 0x0a, 0x4b, 0xa7, 0xc4, 0x82, 0x92, - 0x62, 0x8c, 0xbb, 0xd5, 0x35, 0x47, 0xfb, 0x68, 0xe4, 0x3d, 0x58, 0x3c, 0x66, 0x49, 0xe2, 0x85, - 0xc1, 0xc1, 0x4e, 0xb5, 0x20, 0xe4, 0x7b, 0x04, 0xd4, 0x7d, 0xcc, 0xc3, 0x98, 0x1d, 0xec, 0x54, - 0xe7, 0xa5, 0x6e, 0x35, 0xb5, 0x7e, 0x61, 0x80, 0x39, 0xca, 0x63, 0x95, 0xbe, 0xcf, 0xa0, 0x20, - 0x0f, 0xa4, 0xf4, 0xfa, 0x7f, 0x3b, 0xca, 0xf2, 0x97, 0xbc, 0x0b, 0x05, 0xa9, 0x5d, 0x55, 0xa1, - 0x9a, 0x59, 0xbf, 0x2a, 0x40, 0xe9, 0x18, 0x37, 0x90, 0xc6, 0xd9, 0x06, 0xe8, 0xa5, 0x47, 0x1d, - 0xe9, 0xc1, 0xa4, 0x69, 0x1c, 0xc4, 0x84, 0x85, 0x3d, 0x75, 0x7c, 0xd4, 0x0d, 0xd6, 0x9d, 0x93, - 0xcf, 0xa1, 0x98, 0x8e, 0x9f, 0x46, 0xbc, 0x9a, 0x13, 0xe7, 0xef, 0x5e, 0xc6, 0xf9, 0xd3, 0x77, - 0x62, 0x6b, 0xa2, 0xea, 0xf4, 0x69, 0x14, 0x72, 0x13, 0x2e, 0x38, 0xbe, 0x1f, 0x9e, 0xa9, 0x92, - 0x12, 0xc5, 0x21, 0x92, 0xb3, 0x40, 0x87, 0x17, 0xb0, 0x55, 0x6a, 0xc4, 0x47, 0x71, 0xec, 0x74, - 0xf0, 0x34, 0x15, 0x04, 0xff, 0xa8, 0x25, 0xec, 0x5a, 0x7b, 0x5e, 0xe0, 0xf8, 0x55, 0x10, 0x3c, - 0x72, 0x82, 0xa7, 0x61, 0xf7, 0x75, 0x14, 0xc6, 0x9c, 0xc5, 0x8f, 0x38, 0x8f, 0xab, 0x45, 0x11, - 0xcc, 0x3e, 0x1a, 0xa9, 0x43, 0x69, 0xdb, 0x69, 0xbc, 0x60, 0x07, 0xa7, 0x48, 0x4c, 0x91, 0x55, - 0x56, 0x2f, 0x13, 0xec, 0x4f, 0x23, 0x1d, 0x52, 0xe9, 0x1a, 0x48, 0x03, 0xca, 0xa9, 0xeb, 0xb2, - 0x42, 0xab, 0x4b, 0x42, 0xe7, 0x83, 0x69, 0x43, 0x29, 0xa5, 0xa5, 0x89, 0x01, 0x95, 0x98, 0xc8, - 0x5d, 0x2c, 0x46, 0x87, 0xb3, 0x6a, 0x59, 0xf8, 0xdc, 0x9d, 0x93, 0x23, 0x28, 0x1f, 0x0b, 0x40, - 0x5e, 0x47, 0x18, 0xee, 0xb1, 0xa4, 0xba, 0x2c, 0x36, 0x70, 0x7d, 0x78, 0x03, 0x3a, 0x70, 0xb7, - 0x05, 0x7b, 0x87, 0x0e, 0x08, 0x9b, 0x0f, 0xa1, 0x32, 0x98, 0xdc, 0x69, 0x80, 0x91, 0xf9, 0x43, - 0xb8, 0x38, 0xc2, 0xa3, 0xb7, 0x6a, 0x3e, 0x7f, 0x32, 0xe0, 0xc2, 0x50, 0x1a, 0xf0, 0x02, 0x10, - 0x45, 0x2f, 0x55, 0x8a, 0x31, 0x39, 0x82, 0x39, 0x4c, 0x73, 0xa2, 0xa0, 0xc0, 0xe6, 0x34, 0x79, - 0xb5, 0x85, 0xa4, 0x8c, 0xbf, 0xd4, 0x62, 0xde, 0x03, 0xe8, 0x11, 0xa7, 0x82, 0x87, 0x5f, 0xc0, - 0x92, 0x4a, 0xb2, 0xea, 0x17, 0x15, 0x89, 0x2a, 0x94, 0x30, 0xa2, 0x86, 0xde, 0xdd, 0x94, 0x9b, - 0xf2, 0x6e, 0xb2, 0xbe, 0x82, 0x65, 0xca, 0x1c, 0x77, 0xcf, 0xf3, 0xd9, 0xf9, 0x2d, 0x18, 0x8b, - 0xdf, 0xf3, 0x59, 0x1d, 0x91, 0x49, 0x5a, 0xfc, 0x6a, 0x4e, 0xee, 0xc3, 0x1c, 0x75, 0x82, 0x16, - 0x53, 0xa6, 0x3f, 0xc8, 0x30, 0x2d, 0x8c, 0x20, 0x2f, 0x95, 0x22, 0xd6, 0x03, 0x58, 0xec, 0xd2, - 0xb0, 0x75, 0x3d, 0x6d, 0x36, 0x13, 0x26, 0xdb, 0x60, 0x8e, 0xaa, 0x19, 0xd2, 0x0f, 0x59, 0xd0, - 0x52, 0xa6, 0x73, 0x54, 0xcd, 0xac, 0x55, 0x84, 0xf3, 0xe9, 0xce, 0x55, 0x68, 0x08, 0xe4, 0x77, - 0x10, 0xbe, 0x19, 0xa2, 0x5e, 0xc5, 0xd8, 0x72, 0xf1, 0x4e, 0x75, 0xdc, 0x1d, 0x2f, 0x3e, 0xdf, - 0xc1, 0x2a, 0xcc, 0xef, 0x78, 0xb1, 0xe6, 0x5f, 0x3a, 0x25, 0xab, 0x78, 0xdb, 0x36, 0xfc, 0xb6, - 0x8b, 0xde, 0x72, 0x16, 0x07, 0xea, 0x5a, 0x19, 0xa0, 0x5a, 0x9f, 0xc8, 0x38, 0x0a, 0x2b, 0x6a, - 0x33, 0x37, 0x61, 0x9e, 0x05, 0x3c, 0xc6, 0x32, 0x92, 0x57, 0x32, 0xb1, 0xe5, 0x03, 0xd9, 0x16, - 0x0f, 0x64, 0x71, 0xf5, 0xd3, 0x94, 0xc5, 0xda, 0x84, 0x65, 0x24, 0x64, 0x27, 0x82, 0x40, 0x5e, - 0xdb, 0xa4, 0x18, 0x5b, 0xf7, 0xa1, 0xd2, 0x13, 0x54, 0xa6, 0x57, 0x21, 0x8f, 0xd8, 0x54, 0xf5, - 0xf5, 0x51, 0x76, 0xc5, 0xba, 0x75, 0x0d, 0x96, 0xd3, 0xe2, 0x3f, 0xd7, 0xa8, 0x45, 0xa0, 0xd2, - 0x63, 0x52, 0xb0, 0x64, 0x09, 0x8a, 0x75, 0x2f, 0x48, 0x6f, 0x6d, 0xeb, 0x8d, 0x01, 0xa5, 0x7a, - 0x18, 0xf4, 0xee, 0xb4, 0x3a, 0x2c, 0xa7, 0xa5, 0xfb, 0xa8, 0x7e, 0xb0, 0xed, 0x44, 0x69, 0x0c, - 0x56, 0x86, 0xcf, 0x87, 0xfa, 0xc4, 0x60, 0x4b, 0xc6, 0xad, 0x3c, 0x5e, 0x7f, 0x74, 0x50, 0x9c, - 0x7c, 0x0a, 0xf3, 0x87, 0x87, 0x5b, 0x42, 0xd3, 0xec, 0x54, 0x9a, 0x52, 0x31, 0xf2, 0x10, 0xe6, - 0x9f, 0x8b, 0x2f, 0x1f, 0x89, 0xba, 0xa2, 0x46, 0x9c, 0x55, 0x19, 0x21, 0xc9, 0x46, 0x59, 0x23, - 0x8c, 0x5d, 0x9a, 0x0a, 0x59, 0xff, 0x36, 0xa0, 0xf8, 0xdc, 0xe9, 0x21, 0xc2, 0x1e, 0x04, 0x7d, - 0x8b, 0x7b, 0x5b, 0x41, 0xd0, 0x4b, 0x30, 0xe7, 0xb3, 0x57, 0xcc, 0x57, 0x67, 0x5c, 0x4e, 0x90, - 0x9a, 0xbc, 0x08, 0x63, 0x59, 0xd6, 0x25, 0x2a, 0x27, 0x58, 0x10, 0x2e, 0xe3, 0x8e, 0xe7, 0x57, - 0xf3, 0x2b, 0x39, 0xbc, 0xe3, 0xe5, 0x0c, 0x33, 0xd7, 0x8e, 0x7d, 0xf5, 0x2e, 0xc0, 0x21, 0xb1, - 0x20, 0xef, 0x05, 0xcd, 0x50, 0xdc, 0x7f, 0xaa, 0x2d, 0xca, 0x16, 0x7d, 0x10, 0x34, 0x43, 0x2a, - 0xd6, 0xc8, 0x55, 0x28, 0xc4, 0x58, 0x7f, 0x49, 0x75, 0x5e, 0x04, 0x65, 0x11, 0xb9, 0x64, 0x95, - 0xaa, 0x05, 0xab, 0x0c, 0x25, 0xe9, 0xb7, 0x4a, 0xfe, 0x6f, 0x66, 0xe1, 0xe2, 0x13, 0x76, 0xb6, - 0x9d, 0xfa, 0x95, 0x06, 0x64, 0x05, 0x8a, 0x5d, 0xda, 0xc1, 0x8e, 0x3a, 0x42, 0x3a, 0x09, 0x8d, - 0x1d, 0x85, 0xed, 0x80, 0xa7, 0x39, 0x14, 0xc6, 0x04, 0x85, 0xaa, 0x05, 0x72, 0x1d, 0xe6, 0x9f, - 0x30, 0x7e, 0x16, 0xc6, 0x2f, 0x85, 0xd7, 0xe5, 0x8d, 0x22, 0xf2, 0x3c, 0x61, 0x1c, 0x01, 0x1c, - 0x4d, 0xd7, 0x10, 0x15, 0x46, 0x29, 0x2a, 0xcc, 0x8f, 0x42, 0x85, 0xe9, 0x2a, 0xd9, 0x84, 0x62, - 0x23, 0x0c, 0x12, 0x1e, 0x3b, 0x1e, 0x1a, 0x9e, 0x13, 0xcc, 0xef, 0x20, 0xb3, 0x4c, 0xec, 0x76, - 0x6f, 0x91, 0xea, 0x9c, 0x64, 0x1d, 0x80, 0xbd, 0xe6, 0xb1, 0xb3, 0x1f, 0x26, 0xdd, 0x17, 0x14, - 0xa0, 0x1c, 0x12, 0x0e, 0xea, 0x54, 0x5b, 0xb5, 0xde, 0x85, 0x4b, 0xfd, 0x11, 0x51, 0xa1, 0x7a, - 0x00, 0xff, 0x47, 0x99, 0xcf, 0x9c, 0x84, 0x4d, 0x1f, 0x2d, 0xcb, 0x84, 0xea, 0xb0, 0xb0, 0x52, - 0xfc, 0x9f, 0x1c, 0x14, 0x77, 0x5f, 0xb3, 0xc6, 0x11, 0x4b, 0x12, 0xa7, 0x25, 0xb0, 0x69, 0x3d, - 0x0e, 0x1b, 0x2c, 0x49, 0xba, 0xba, 0x7a, 0x04, 0xf2, 0x7d, 0xc8, 0x1f, 0x04, 0x1e, 0x57, 0xf7, - 0xe3, 0x6a, 0xe6, 0xd3, 0xc0, 0xe3, 0x4a, 0xe7, 0xfe, 0x0c, 0x15, 0x52, 0xe4, 0x3e, 0xe4, 0xb1, - 0xbb, 0x4c, 0xd2, 0xe1, 0x5d, 0x4d, 0x16, 0x65, 0xc8, 0x96, 0xf8, 0x84, 0xe7, 0x7d, 0xc9, 0x54, - 0x96, 0xd6, 0xb2, 0xaf, 0x26, 0xef, 0x4b, 0xd6, 0xd3, 0xa0, 0x24, 0xc9, 0x2e, 0x22, 0x6b, 0x27, - 0xe6, 0xcc, 0x55, 0xd9, 0xbb, 0x91, 0x05, 0x88, 0x24, 0x67, 0x4f, 0x4b, 0x2a, 0x8b, 0x41, 0xd8, - 0x7d, 0xed, 0x71, 0x55, 0x0d, 0x59, 0x41, 0x40, 0x36, 0xcd, 0x11, 0x9c, 0xa2, 0xf4, 0x4e, 0x18, - 0x30, 0x81, 0xed, 0xb3, 0xa5, 0x91, 0x4d, 0x93, 0xc6, 0x29, 0x86, 0xe1, 0xd8, 0x6b, 0x21, 0xce, - 0x5c, 0x18, 0x1b, 0x06, 0xc9, 0xa8, 0x85, 0x41, 0x12, 0xb6, 0xe6, 0x61, 0x4e, 0xc0, 0x20, 0xeb, - 0x77, 0x06, 0x14, 0xb5, 0x3c, 0x4d, 0x50, 0x77, 0xef, 0x41, 0x1e, 0x9f, 0xef, 0x2a, 0xff, 0x0b, - 0xa2, 0xea, 0x18, 0x77, 0xa8, 0xa0, 0x62, 0xe3, 0xd8, 0x73, 0x65, 0x53, 0x5c, 0xa2, 0x38, 0x44, - 0xca, 0x33, 0xde, 0x11, 0x29, 0x5b, 0xa0, 0x38, 0x24, 0x37, 0x61, 0xe1, 0x98, 0x35, 0xda, 0xb1, - 0xc7, 0x3b, 0x22, 0x09, 0xe5, 0x8d, 0x8a, 0x68, 0x27, 0x8a, 0x26, 0x8a, 0xb3, 0xcb, 0x61, 0x3d, - 0xc6, 0xc3, 0xd9, 0xdb, 0x20, 0x81, 0xfc, 0x36, 0xbe, 0xc8, 0x70, 0x67, 0x4b, 0x54, 0x8c, 0xf1, - 0x51, 0xbc, 0x3b, 0xee, 0x51, 0xbc, 0x9b, 0x3e, 0x8a, 0xfb, 0x93, 0x8a, 0xb7, 0x8f, 0x16, 0x64, - 0xeb, 0x11, 0x2c, 0x76, 0x0f, 0x1e, 0x29, 0xc3, 0xec, 0x9e, 0xab, 0x2c, 0xcd, 0xee, 0xb9, 0xe8, - 0xca, 0xee, 0xd3, 0x3d, 0x61, 0x65, 0x81, 0xe2, 0xb0, 0x0b, 0x12, 0x72, 0x1a, 0x48, 0xd8, 0xc4, - 0xe7, 0xbe, 0x76, 0xfa, 0x90, 0x89, 0x86, 0x67, 0x49, 0xba, 0x65, 0x1c, 0x4b, 0x37, 0xfc, 0x44, - 0xe8, 0x12, 0x6e, 0xf8, 0x89, 0x75, 0x0d, 0x96, 0xfa, 0xf2, 0x85, 0x4c, 0xe2, 0x7d, 0xa9, 0xb0, - 0x24, 0x8e, 0xd7, 0x19, 0x2c, 0x0f, 0x7c, 0x72, 0x22, 0xd7, 0xa1, 0x20, 0x3f, 0x6d, 0x54, 0x66, - 0xcc, 0xcb, 0x5f, 0x7f, 0xb3, 0xf2, 0xce, 0x00, 0x83, 0x5c, 0x44, 0xb6, 0xad, 0x76, 0xe0, 0xfa, - 0xac, 0x62, 0x8c, 0x64, 0x93, 0x8b, 0x66, 0xfe, 0xd7, 0x7f, 0xb8, 0x32, 0xb3, 0xee, 0xc0, 0x85, - 0xa1, 0xcf, 0x25, 0xe4, 0x1a, 0xe4, 0x8f, 0x99, 0xdf, 0x4c, 0xcd, 0x0c, 0x31, 0xe0, 0x22, 0xb9, - 0x0a, 0x39, 0xea, 0x9c, 0x55, 0x0c, 0xb3, 0xfa, 0xf5, 0x37, 0x2b, 0x97, 0x86, 0xbf, 0xb9, 0x38, - 0x67, 0xd2, 0xc4, 0xc6, 0x5f, 0x00, 0x16, 0x0f, 0x0f, 0xb7, 0xb6, 0x62, 0xcf, 0x6d, 0x31, 0xf2, - 0x4b, 0x03, 0xc8, 0xf0, 0xc3, 0x96, 0xdc, 0xc9, 0xae, 0xf1, 0xd1, 0x2f, 0x7f, 0xf3, 0xee, 0x94, - 0x52, 0x0a, 0x69, 0x7c, 0x0e, 0x73, 0x02, 0x1e, 0x93, 0x0f, 0x27, 0x7c, 0x25, 0x99, 0x6b, 0xe3, - 0x19, 0x95, 0xee, 0x06, 0x2c, 0xa4, 0x10, 0x93, 0xac, 0x67, 0x6e, 0xaf, 0x0f, 0x41, 0x9b, 0x1f, - 0x4d, 0xc4, 0xab, 0x8c, 0xfc, 0x14, 0xe6, 0x15, 0x72, 0x24, 0x37, 0xc6, 0xc8, 0xf5, 0x30, 0xac, - 0xb9, 0x3e, 0x09, 0x6b, 0xcf, 0x8d, 0x14, 0x21, 0x66, 0xba, 0x31, 0x80, 0x3f, 0x33, 0xdd, 0x18, - 0x82, 0x9c, 0x8d, 0xde, 0xbb, 0x32, 0xd3, 0xc8, 0x00, 0xde, 0xcc, 0x34, 0x32, 0x08, 0x3b, 0xc9, - 0x73, 0xc8, 0x23, 0xec, 0x24, 0x59, 0xed, 0x57, 0xc3, 0xa5, 0x66, 0xd6, 0x99, 0xe8, 0xc3, 0xab, - 0x3f, 0xc1, 0x6b, 0x4a, 0x7c, 0x42, 0xc8, 0xbe, 0xa0, 0xb4, 0x2f, 0x82, 0xe6, 0x8d, 0x09, 0x38, - 0x7b, 0xea, 0xd5, 0xf3, 0x7b, 0x6d, 0x82, 0xcf, 0x72, 0xe3, 0xd5, 0x0f, 0x7c, 0x00, 0x0c, 0xa1, - 0xa4, 0xa3, 0x0f, 0x62, 0x67, 0x88, 0x8e, 0x00, 0x6e, 0x66, 0x6d, 0x62, 0x7e, 0x65, 0xf0, 0x2b, - 0x7c, 0x7b, 0xf5, 0x23, 0x13, 0xb2, 0x91, 0x19, 0x8e, 0x91, 0x18, 0xc8, 0xbc, 0x3d, 0x95, 0x8c, - 0x32, 0xee, 0x48, 0xe4, 0xa3, 0xd0, 0x0d, 0xc9, 0xbe, 0xc8, 0xbb, 0x08, 0xc9, 0x9c, 0x90, 0x6f, - 0xcd, 0xb8, 0x65, 0xe0, 0x39, 0x43, 0xc4, 0x9b, 0xa9, 0x5b, 0x7b, 0x0a, 0x64, 0x9e, 0x33, 0x1d, - 0x3a, 0x6f, 0x95, 0xbe, 0x7d, 0x73, 0xc5, 0xf8, 0xfb, 0x9b, 0x2b, 0xc6, 0x3f, 0xdf, 0x5c, 0x31, - 0x4e, 0x0a, 0xe2, 0x7f, 0xce, 0xdb, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x91, 0xe5, 0xca, - 0x70, 0x1e, 0x00, 0x00, + 0xf5, 0xd7, 0x8a, 0x14, 0x45, 0x3e, 0xfe, 0x10, 0x3d, 0x71, 0xf2, 0xa5, 0x17, 0x81, 0x23, 0xaf, + 0x63, 0x45, 0x96, 0x1d, 0xd2, 0x5f, 0xd9, 0x86, 0x5c, 0xbb, 0x75, 0x62, 0xfd, 0x82, 0x14, 0x4b, + 0x36, 0x3b, 0x72, 0xe1, 0x22, 0x48, 0x81, 0xae, 0xb8, 0x43, 0x6a, 0xeb, 0xd5, 0xee, 0x76, 0x77, + 0x28, 0x59, 0xc9, 0xa9, 0x87, 0x02, 0x45, 0x8e, 0x3d, 0xf4, 0x96, 0x4b, 0x0b, 0xf4, 0xd4, 0x43, + 0xfb, 0x07, 0x34, 0xe7, 0x00, 0xed, 0xa1, 0xe7, 0x1e, 0x82, 0xc2, 0x7f, 0x44, 0x81, 0xde, 0x8a, + 0x37, 0x33, 0x4b, 0x0e, 0x7f, 0x68, 0x45, 0xd6, 0x27, 0xce, 0xbc, 0x79, 0x3f, 0xe6, 0xbd, 0x37, + 0xef, 0xcd, 0x67, 0x96, 0x50, 0xee, 0xd8, 0x9c, 0x9d, 0xda, 0x67, 0xf5, 0x30, 0x0a, 0x78, 0x40, + 0xae, 0x1c, 0x07, 0x87, 0x67, 0xf5, 0xc3, 0xae, 0xeb, 0x39, 0xaf, 0x5c, 0x5e, 0x3f, 0xf9, 0xff, + 0x7a, 0x3b, 0x0a, 0x7c, 0xce, 0x7c, 0xc7, 0xfc, 0xb8, 0xe3, 0xf2, 0xa3, 0xee, 0x61, 0xbd, 0x15, + 0x1c, 0x37, 0x3a, 0x41, 0x27, 0x68, 0x08, 0x89, 0xc3, 0x6e, 0x5b, 0xcc, 0xc4, 0x44, 0x8c, 0xa4, + 0x26, 0x73, 0x75, 0x98, 0xbd, 0x13, 0x04, 0x1d, 0x8f, 0xd9, 0xa1, 0x1b, 0xab, 0x61, 0x23, 0x0a, + 0x5b, 0x8d, 0x98, 0xdb, 0xbc, 0x1b, 0x2b, 0x99, 0xdb, 0x9a, 0x0c, 0x6e, 0xa4, 0x91, 0x6c, 0xa4, + 0x11, 0x07, 0xde, 0x09, 0x8b, 0x1a, 0xe1, 0x61, 0x23, 0x08, 0x13, 0xee, 0xc6, 0xb9, 0xdc, 0x76, + 0xe8, 0x36, 0xf8, 0x59, 0xc8, 0xe2, 0xc6, 0x69, 0x10, 0xbd, 0x62, 0x91, 0x12, 0xb8, 0x7b, 0xae, + 0x40, 0x97, 0xbb, 0x1e, 0x4a, 0xb5, 0xec, 0x30, 0x46, 0x23, 0xf8, 0xab, 0x84, 0x74, 0xb7, 0x79, + 0xe0, 0xbb, 0x31, 0x77, 0xdd, 0x8e, 0xdb, 0x68, 0xc7, 0x42, 0x46, 0x5a, 0x41, 0x27, 0x14, 0xfb, + 0xfd, 0x14, 0x17, 0xba, 0x51, 0x8b, 0x85, 0x81, 0xe7, 0xb6, 0xce, 0xd0, 0x86, 0x1c, 0x49, 0x31, + 0xeb, 0x6f, 0x59, 0xc8, 0x51, 0x16, 0x77, 0x3d, 0x4e, 0x96, 0xa0, 0x1c, 0xb1, 0xf6, 0x26, 0x0b, + 0x23, 0xd6, 0xb2, 0x39, 0x73, 0x6a, 0xc6, 0xa2, 0xb1, 0x5c, 0xd8, 0x99, 0xa1, 0x83, 0x64, 0xf2, + 0x13, 0xa8, 0x44, 0xac, 0x1d, 0x6b, 0x8c, 0xb3, 0x8b, 0xc6, 0x72, 0x71, 0xf5, 0x56, 0xfd, 0xdc, + 0x1c, 0xd6, 0x29, 0x6b, 0xef, 0xdb, 0x61, 0x5f, 0x64, 0x67, 0x86, 0x0e, 0x29, 0x21, 0xab, 0x90, + 0x89, 0x58, 0xbb, 0x96, 0x11, 0xba, 0xae, 0xa6, 0xeb, 0xda, 0x99, 0xa1, 0xc8, 0x4c, 0xd6, 0x20, + 0x8b, 0x5a, 0x6a, 0x59, 0x21, 0x74, 0xed, 0xc2, 0x0d, 0xec, 0xcc, 0x50, 0x21, 0x40, 0x9e, 0x42, + 0xfe, 0x98, 0x71, 0xdb, 0xb1, 0xb9, 0x5d, 0x83, 0xc5, 0xcc, 0x72, 0x71, 0xb5, 0x91, 0x2a, 0x8c, + 0x01, 0xaa, 0xef, 0x2b, 0x89, 0x2d, 0x9f, 0x47, 0x67, 0xb4, 0xa7, 0x80, 0xbc, 0x84, 0x92, 0xcd, + 0x39, 0xc3, 0x64, 0xb8, 0x81, 0x1f, 0xd7, 0x4a, 0x42, 0xe1, 0xdd, 0x8b, 0x15, 0x3e, 0xd1, 0xa4, + 0xa4, 0xd2, 0x01, 0x45, 0xe6, 0x23, 0x28, 0x0f, 0xd8, 0x24, 0x55, 0xc8, 0xbc, 0x62, 0x67, 0x32, + 0x31, 0x14, 0x87, 0xe4, 0x32, 0xcc, 0x9d, 0xd8, 0x5e, 0x97, 0x89, 0x1c, 0x94, 0xa8, 0x9c, 0x3c, + 0x9c, 0x7d, 0x60, 0x98, 0x47, 0x70, 0x69, 0x44, 0xff, 0x18, 0x05, 0x3f, 0xd2, 0x15, 0x14, 0x57, + 0x3f, 0x4a, 0xd9, 0xb5, 0xae, 0x4e, 0xb3, 0xb4, 0x9e, 0x87, 0x5c, 0x24, 0x1c, 0xb2, 0x7e, 0x67, + 0x40, 0x75, 0x38, 0xd5, 0x64, 0x57, 0x25, 0xc9, 0x10, 0x61, 0xb9, 0x3f, 0xc5, 0x29, 0x41, 0x82, + 0x0a, 0x8c, 0x50, 0x61, 0xae, 0x41, 0xa1, 0x47, 0xba, 0x28, 0x18, 0x05, 0x6d, 0x8b, 0xd6, 0x1a, + 0x64, 0x28, 0x6b, 0x93, 0x0a, 0xcc, 0xba, 0xea, 0x5c, 0xd3, 0x59, 0xd7, 0x21, 0x8b, 0x90, 0x71, + 0x58, 0x5b, 0xb9, 0x5e, 0xa9, 0x87, 0x87, 0xf5, 0x4d, 0xd6, 0x76, 0x7d, 0x17, 0x5d, 0xa4, 0xb8, + 0x64, 0xfd, 0xde, 0xc0, 0xfa, 0xc0, 0x6d, 0x91, 0x4f, 0x06, 0xfc, 0xb8, 0xf8, 0xb4, 0x8f, 0xec, + 0xfe, 0x65, 0xfa, 0xee, 0xef, 0x0d, 0x66, 0xe2, 0x82, 0x12, 0xd0, 0xbd, 0xfb, 0x29, 0x94, 0xf4, + 0xdc, 0x90, 0x1d, 0x28, 0x6a, 0xe7, 0x48, 0x6d, 0x78, 0x69, 0xb2, 0xcc, 0x52, 0x5d, 0xd4, 0xfa, + 0x63, 0x06, 0x8a, 0xda, 0x22, 0x79, 0x0c, 0xd9, 0x57, 0xae, 0x2f, 0x43, 0x58, 0x59, 0x5d, 0x99, + 0x4c, 0xe5, 0x53, 0xd7, 0x77, 0xa8, 0x90, 0x23, 0x4d, 0xad, 0xee, 0x66, 0xc5, 0xb6, 0xee, 0x4d, + 0xa6, 0xe3, 0xdc, 0xe2, 0xbb, 0x33, 0x45, 0xdb, 0x90, 0x4d, 0x83, 0x40, 0x36, 0xb4, 0xf9, 0x91, + 0x68, 0x1a, 0x05, 0x2a, 0xc6, 0xe4, 0x0e, 0xbc, 0xe3, 0xfa, 0x2f, 0x02, 0x1e, 0x34, 0x23, 0xe6, + 0xb8, 0x78, 0xf8, 0x5e, 0x9c, 0x85, 0xac, 0x36, 0x27, 0x58, 0xc6, 0x2d, 0x91, 0x26, 0x54, 0x24, + 0xf9, 0xa0, 0x7b, 0xf8, 0x0b, 0xd6, 0xe2, 0x71, 0x2d, 0x27, 0xfc, 0x59, 0x4e, 0xd9, 0xc2, 0xae, + 0x2e, 0x40, 0x87, 0xe4, 0xdf, 0xaa, 0xda, 0xad, 0xbf, 0x18, 0x50, 0x1e, 0x50, 0x4f, 0x3e, 0x1d, + 0x48, 0xd5, 0xed, 0x49, 0xb7, 0xa5, 0x25, 0xeb, 0x33, 0xc8, 0x39, 0x6e, 0x87, 0xc5, 0x5c, 0xa4, + 0xaa, 0xb0, 0xbe, 0xfa, 0xdd, 0xf7, 0x1f, 0xcc, 0xfc, 0xf3, 0xfb, 0x0f, 0x56, 0xb4, 0xab, 0x26, + 0x08, 0x99, 0xdf, 0x0a, 0x7c, 0x6e, 0xbb, 0x3e, 0x8b, 0xf0, 0x82, 0xfd, 0x58, 0x8a, 0xd4, 0x37, + 0xc5, 0x0f, 0x55, 0x1a, 0x30, 0xe8, 0xbe, 0x7d, 0xcc, 0x44, 0x9e, 0x0a, 0x54, 0x8c, 0x2d, 0x0e, + 0x65, 0xca, 0x78, 0x37, 0xf2, 0x29, 0xfb, 0x65, 0x17, 0x99, 0x7e, 0x90, 0x34, 0x12, 0xb1, 0xe9, + 0x8b, 0x1a, 0x3a, 0x32, 0x52, 0x25, 0x40, 0x96, 0x61, 0x8e, 0x45, 0x51, 0x10, 0xa9, 0xe2, 0x21, + 0x75, 0x79, 0xd5, 0xd7, 0xa3, 0xb0, 0x55, 0x3f, 0x10, 0x57, 0x3d, 0x95, 0x0c, 0x56, 0x15, 0x2a, + 0x89, 0xd5, 0x38, 0x0c, 0xfc, 0x98, 0x59, 0x0b, 0x18, 0xba, 0xb0, 0xcb, 0x63, 0xb5, 0x0f, 0xeb, + 0x5b, 0x03, 0x2a, 0x09, 0x45, 0xf2, 0x90, 0x2f, 0xa0, 0xd8, 0x6f, 0x0d, 0x49, 0x0f, 0x78, 0x98, + 0x1a, 0x54, 0x5d, 0x5e, 0xeb, 0x2b, 0xaa, 0x25, 0xe8, 0xea, 0xcc, 0x67, 0x50, 0x1d, 0x66, 0x18, + 0x93, 0xfd, 0x0f, 0x07, 0x1b, 0xc4, 0x70, 0xbf, 0xd2, 0x4e, 0xc3, 0xb7, 0xb3, 0x70, 0x85, 0x32, + 0x81, 0x5d, 0x76, 0x8f, 0xed, 0x0e, 0xdb, 0x08, 0xfc, 0xb6, 0xdb, 0x49, 0xc2, 0x5c, 0x15, 0xcd, + 0x30, 0xd1, 0x8c, 0x7d, 0x71, 0x19, 0xf2, 0x4d, 0xcf, 0xe6, 0xed, 0x20, 0x3a, 0x56, 0xca, 0x4b, + 0xa8, 0x3c, 0xa1, 0xd1, 0xde, 0x2a, 0x59, 0x84, 0xa2, 0x52, 0xbc, 0x1f, 0x38, 0x49, 0x3a, 0x75, + 0x12, 0xa9, 0xc1, 0xfc, 0x5e, 0xd0, 0x79, 0x86, 0xc9, 0x96, 0x15, 0x96, 0x4c, 0x89, 0x05, 0x25, + 0xc5, 0x18, 0xf5, 0xaa, 0x6b, 0x8e, 0x0e, 0xd0, 0xc8, 0xfb, 0x50, 0x38, 0x60, 0x71, 0xec, 0x06, + 0xfe, 0xee, 0x66, 0x2d, 0x27, 0xe4, 0xfb, 0x04, 0xd4, 0x7d, 0xc0, 0x83, 0x88, 0xed, 0x6e, 0xd6, + 0xe6, 0xa5, 0x6e, 0x35, 0x25, 0xfb, 0x50, 0x39, 0x10, 0x38, 0xa7, 0x89, 0xe8, 0xc6, 0x65, 0x71, + 0x2d, 0x2f, 0x52, 0x74, 0x63, 0x34, 0x45, 0x3a, 0x1e, 0xaa, 0x0b, 0xf6, 0x33, 0x3a, 0x24, 0x6c, + 0xfd, 0xd6, 0x00, 0x73, 0x5c, 0x00, 0xd5, 0x69, 0xf8, 0x0c, 0x72, 0xf2, 0x7c, 0xcb, 0x20, 0xfe, + 0x6f, 0x95, 0x21, 0x7f, 0xc9, 0x7b, 0x90, 0x93, 0xda, 0x55, 0x51, 0xab, 0x59, 0x92, 0xa5, 0x4c, + 0x2f, 0x4b, 0xd6, 0xaf, 0x73, 0x50, 0x3a, 0xc0, 0x2d, 0x25, 0x89, 0xac, 0x03, 0xf4, 0xf3, 0xaf, + 0x6a, 0x66, 0xf8, 0x54, 0x68, 0x1c, 0xc4, 0x84, 0xfc, 0xb6, 0x3a, 0x9f, 0xea, 0x8a, 0xec, 0xcd, + 0xc9, 0xe7, 0x50, 0x4c, 0xc6, 0xcf, 0x43, 0x5e, 0xcb, 0x88, 0xe8, 0x3d, 0x48, 0x39, 0xe0, 0xfa, + 0x4e, 0xea, 0x9a, 0xa8, 0x3a, 0xde, 0x1a, 0x85, 0xdc, 0x86, 0x4b, 0xb6, 0xe7, 0x05, 0xa7, 0xaa, + 0x66, 0x45, 0xf5, 0x89, 0xec, 0xe7, 0xe9, 0xe8, 0x02, 0xf6, 0x62, 0x8d, 0xf8, 0x24, 0x8a, 0xec, + 0x33, 0x0c, 0x44, 0x4e, 0xf0, 0x8f, 0x5b, 0xc2, 0xb6, 0xb8, 0xed, 0xfa, 0xb6, 0x57, 0x03, 0xc1, + 0x23, 0x27, 0x78, 0xdc, 0xb6, 0x5e, 0x87, 0x41, 0xc4, 0x59, 0xf4, 0x84, 0xf3, 0xa8, 0x56, 0x14, + 0xe1, 0x1d, 0xa0, 0x91, 0x26, 0x94, 0x36, 0xec, 0xd6, 0x11, 0xdb, 0x3d, 0x46, 0x62, 0x02, 0xdd, + 0xd2, 0x9a, 0xa5, 0x60, 0x7f, 0x1e, 0xea, 0x98, 0x4d, 0xd7, 0x40, 0x5a, 0x50, 0x49, 0x5c, 0x97, + 0x2d, 0xa0, 0x56, 0x16, 0x3a, 0x1f, 0x4d, 0x1b, 0x4a, 0x29, 0x2d, 0x4d, 0x0c, 0xa9, 0xc4, 0x44, + 0x6e, 0x61, 0xb5, 0xdb, 0x9c, 0xd5, 0x2a, 0xc2, 0xe7, 0xde, 0x7c, 0x4c, 0x25, 0x2c, 0xbc, 0x45, + 0x25, 0x98, 0x8f, 0xa1, 0x3a, 0x9c, 0xdc, 0x69, 0x90, 0x97, 0xf9, 0x63, 0x78, 0x67, 0x8c, 0x47, + 0x6f, 0xd5, 0xdd, 0xfe, 0x6c, 0xc0, 0xa5, 0x91, 0x34, 0xe0, 0x0d, 0x23, 0xba, 0x8a, 0x54, 0x29, + 0xc6, 0x64, 0x1f, 0xe6, 0x30, 0xcd, 0xb1, 0xc2, 0x1a, 0x6b, 0xd3, 0xe4, 0xb5, 0x2e, 0x24, 0x65, + 0xfc, 0xa5, 0x16, 0xf3, 0x01, 0x40, 0x9f, 0x38, 0x15, 0xfe, 0xfc, 0x02, 0xca, 0x2a, 0xc9, 0xaa, + 0x83, 0x54, 0x25, 0x6c, 0x51, 0xc2, 0x08, 0x4b, 0xfa, 0x97, 0x5f, 0x66, 0xca, 0xcb, 0xcf, 0xfa, + 0x0a, 0x16, 0x28, 0xb3, 0x9d, 0x6d, 0xd7, 0x63, 0xe7, 0xf7, 0x78, 0x2c, 0x7e, 0xd7, 0x63, 0x4d, + 0x84, 0x3e, 0x49, 0xf1, 0xab, 0x39, 0x79, 0x08, 0x73, 0xd4, 0xf6, 0x3b, 0x4c, 0x99, 0xfe, 0x30, + 0xc5, 0xb4, 0x30, 0x82, 0xbc, 0x54, 0x8a, 0x58, 0x8f, 0xa0, 0xd0, 0xa3, 0x61, 0x33, 0x7b, 0xde, + 0x6e, 0xc7, 0x4c, 0x36, 0xc6, 0x0c, 0x55, 0x33, 0xa4, 0xef, 0x31, 0xbf, 0xa3, 0x4c, 0x67, 0xa8, + 0x9a, 0x59, 0x4b, 0xf8, 0x5e, 0x48, 0x76, 0xae, 0x42, 0x43, 0x20, 0xbb, 0x89, 0xf8, 0xd0, 0x10, + 0xf5, 0x2a, 0xc6, 0x96, 0x83, 0x97, 0xb6, 0xed, 0x6c, 0xba, 0xd1, 0xf9, 0x0e, 0xd6, 0x60, 0x7e, + 0xd3, 0x8d, 0x34, 0xff, 0x92, 0x29, 0x59, 0xc2, 0xeb, 0xbc, 0xe5, 0x75, 0x1d, 0xf4, 0x96, 0xb3, + 0xc8, 0x57, 0x5d, 0x75, 0x88, 0x6a, 0x7d, 0x22, 0xe3, 0x28, 0xac, 0xa8, 0xcd, 0xdc, 0x86, 0x79, + 0xe6, 0xf3, 0x08, 0xcb, 0x48, 0xde, 0xf9, 0xa4, 0x2e, 0x5f, 0xe0, 0x75, 0xf1, 0x02, 0x17, 0xd8, + 0x82, 0x26, 0x2c, 0xd6, 0x1a, 0x2c, 0x20, 0x21, 0x3d, 0x11, 0x04, 0xb2, 0xda, 0x26, 0xc5, 0xd8, + 0x7a, 0x08, 0xd5, 0xbe, 0xa0, 0x32, 0xbd, 0x04, 0x59, 0x04, 0xbf, 0xaa, 0xaf, 0x8f, 0xb3, 0x2b, + 0xd6, 0xad, 0xeb, 0xb0, 0x90, 0x14, 0xff, 0xb9, 0x46, 0x2d, 0x02, 0xd5, 0x3e, 0x93, 0xc2, 0x3d, + 0x65, 0x28, 0x36, 0x5d, 0x3f, 0x81, 0x05, 0xd6, 0x1b, 0x03, 0x4a, 0xcd, 0xc0, 0xef, 0xdf, 0x72, + 0x4d, 0x58, 0x48, 0x4a, 0xf7, 0x49, 0x73, 0x77, 0xc3, 0x0e, 0x93, 0x18, 0x2c, 0x8e, 0x9e, 0x0f, + 0xf5, 0x0d, 0xa3, 0x2e, 0x19, 0xd7, 0xb3, 0x78, 0x21, 0xd2, 0x61, 0x71, 0xf2, 0x29, 0xcc, 0xef, + 0xed, 0xad, 0x0b, 0x4d, 0xb3, 0x53, 0x69, 0x4a, 0xc4, 0xc8, 0x63, 0x98, 0x7f, 0x29, 0x3e, 0xad, + 0xc4, 0xea, 0x8a, 0x1a, 0x73, 0x56, 0x65, 0x84, 0x24, 0x1b, 0x65, 0xad, 0x20, 0x72, 0x68, 0x22, + 0x64, 0xfd, 0xdb, 0x80, 0xe2, 0x4b, 0xbb, 0x0f, 0x39, 0xfb, 0x18, 0xf7, 0x2d, 0x6e, 0x72, 0x85, + 0x71, 0x2f, 0xc3, 0x9c, 0xc7, 0x4e, 0x98, 0xa7, 0xce, 0xb8, 0x9c, 0x20, 0x35, 0x3e, 0x0a, 0x22, + 0x59, 0xd6, 0x25, 0x2a, 0x27, 0x58, 0x10, 0x0e, 0xe3, 0xb6, 0xeb, 0xd5, 0xb2, 0x8b, 0x19, 0xbc, + 0xf5, 0xe5, 0x0c, 0x33, 0xd7, 0x8d, 0x3c, 0xf5, 0xf0, 0xc0, 0x21, 0xb1, 0x20, 0xeb, 0xfa, 0xed, + 0x40, 0xdc, 0x7f, 0xaa, 0x2d, 0xca, 0x16, 0xbd, 0xeb, 0xb7, 0x03, 0x2a, 0xd6, 0xc8, 0x35, 0xc8, + 0x45, 0x58, 0x7f, 0x71, 0x6d, 0x5e, 0x04, 0xa5, 0x80, 0x5c, 0xb2, 0x4a, 0xd5, 0x82, 0x55, 0x81, + 0x92, 0xf4, 0x5b, 0x25, 0xff, 0x4f, 0xb3, 0xf0, 0xce, 0x33, 0x76, 0xba, 0x91, 0xf8, 0x95, 0x04, + 0x64, 0x11, 0x8a, 0x3d, 0xda, 0xee, 0xa6, 0x3a, 0x42, 0x3a, 0x09, 0x8d, 0xed, 0x07, 0x5d, 0x9f, + 0x27, 0x39, 0x14, 0xc6, 0x04, 0x85, 0xaa, 0x05, 0x72, 0x03, 0xe6, 0x9f, 0x31, 0x7e, 0x1a, 0x44, + 0xaf, 0x84, 0xd7, 0x95, 0xd5, 0x22, 0xf2, 0x3c, 0x63, 0x1c, 0x11, 0x22, 0x4d, 0xd6, 0x10, 0x76, + 0x86, 0x09, 0xec, 0xcc, 0x8e, 0x83, 0x9d, 0xc9, 0x2a, 0x59, 0x83, 0x62, 0x2b, 0xf0, 0x63, 0x1e, + 0xd9, 0x2e, 0x1a, 0x9e, 0x13, 0xcc, 0xef, 0x22, 0xb3, 0x4c, 0xec, 0x46, 0x7f, 0x91, 0xea, 0x9c, + 0x64, 0x05, 0x80, 0xbd, 0xe6, 0x91, 0xbd, 0x13, 0xc4, 0xbd, 0x27, 0x1a, 0xa0, 0x1c, 0x12, 0x76, + 0x9b, 0x54, 0x5b, 0xc5, 0x0e, 0x79, 0x14, 0xc4, 0x5c, 0xbc, 0x53, 0x24, 0xbc, 0xec, 0xcd, 0xad, + 0xf7, 0xe0, 0xf2, 0x60, 0xb4, 0x54, 0x18, 0x1f, 0xc1, 0xff, 0x51, 0xe6, 0x31, 0x3b, 0x66, 0xd3, + 0x47, 0xd2, 0x32, 0xa1, 0x36, 0x2a, 0xac, 0x14, 0xff, 0x27, 0x03, 0xc5, 0xad, 0xd7, 0xac, 0xb5, + 0xcf, 0xe2, 0xd8, 0xee, 0x08, 0x60, 0xdc, 0x8c, 0x82, 0x16, 0x8b, 0xe3, 0x9e, 0xae, 0x3e, 0x81, + 0xfc, 0x10, 0xb2, 0xbb, 0xbe, 0xcb, 0xd5, 0xdd, 0xb9, 0x94, 0xfa, 0x2e, 0x71, 0xb9, 0xd2, 0xb9, + 0x33, 0x43, 0x85, 0x14, 0x79, 0x08, 0x59, 0xec, 0x3c, 0x93, 0x74, 0x7f, 0x47, 0x93, 0x45, 0x19, + 0xb2, 0x2e, 0xbe, 0x1f, 0xba, 0x5f, 0x32, 0x95, 0xc1, 0xe5, 0xf4, 0x6b, 0xcb, 0xfd, 0x92, 0xf5, + 0x35, 0x28, 0x49, 0xb2, 0x85, 0xb0, 0xde, 0x8e, 0x38, 0x73, 0x54, 0x66, 0x6f, 0xa6, 0x81, 0x25, + 0xc9, 0xd9, 0xd7, 0x92, 0xc8, 0x62, 0x10, 0xb6, 0x5e, 0xbb, 0x5c, 0x55, 0x4a, 0x5a, 0x10, 0x90, + 0x4d, 0x73, 0x04, 0xa7, 0x28, 0xbd, 0x19, 0xf8, 0x32, 0xf3, 0xe9, 0xd2, 0xc8, 0xa6, 0x49, 0xe3, + 0x14, 0xc3, 0x70, 0xe0, 0x76, 0x10, 0x83, 0xe6, 0x2f, 0x0c, 0x83, 0x64, 0xd4, 0xc2, 0x20, 0x09, + 0xeb, 0xf3, 0x30, 0x27, 0x20, 0x92, 0xf5, 0x77, 0x03, 0x8a, 0x5a, 0x9e, 0x26, 0xa8, 0xc9, 0xf7, + 0x21, 0xbb, 0xcf, 0xc4, 0x37, 0x15, 0x34, 0x9e, 0x17, 0x15, 0xc9, 0xb8, 0x4d, 0x05, 0x15, 0x9b, + 0xca, 0xb6, 0x23, 0x1b, 0x66, 0x99, 0xe2, 0x10, 0x29, 0x2f, 0xf8, 0x99, 0x48, 0x59, 0x9e, 0xe2, + 0x90, 0xdc, 0x86, 0xfc, 0x01, 0x6b, 0x75, 0x23, 0x97, 0x9f, 0x89, 0x24, 0x54, 0x56, 0xab, 0xa2, + 0xd5, 0x28, 0x9a, 0x28, 0xdc, 0x1e, 0x07, 0xb9, 0x05, 0x85, 0x98, 0xb5, 0x22, 0xc6, 0x99, 0x7f, + 0xa2, 0xaa, 0xaa, 0xac, 0xd8, 0x23, 0xc6, 0xb7, 0xfc, 0x13, 0xda, 0x5f, 0xb7, 0x9e, 0xe2, 0x49, + 0xee, 0x7b, 0x43, 0x20, 0xbb, 0x81, 0x6f, 0x47, 0x74, 0xa3, 0x4c, 0xc5, 0x18, 0x9f, 0xef, 0x5b, + 0x17, 0x3d, 0xdf, 0xb7, 0x92, 0xe7, 0xfb, 0xe0, 0x09, 0xc0, 0x6b, 0x4c, 0xcb, 0x88, 0xf5, 0x04, + 0x0a, 0xbd, 0x53, 0x4a, 0x2a, 0x30, 0xbb, 0xed, 0x28, 0x4b, 0xb3, 0xdb, 0x0e, 0xfa, 0xbd, 0xf5, + 0x7c, 0x5b, 0x58, 0xc9, 0x53, 0x1c, 0xf6, 0xd0, 0x46, 0x46, 0x43, 0x1b, 0x6b, 0x50, 0x1e, 0x38, + 0xaa, 0xc8, 0x44, 0x83, 0xd3, 0x38, 0xd9, 0x32, 0x8e, 0xa5, 0x1b, 0x5e, 0x2c, 0x74, 0x09, 0x37, + 0xbc, 0xd8, 0xba, 0x0e, 0xe5, 0x81, 0xe4, 0x22, 0x93, 0x78, 0x09, 0x2b, 0x50, 0x8a, 0xe3, 0x15, + 0x06, 0x0b, 0x43, 0x1f, 0xc7, 0xc8, 0x0d, 0xc8, 0xc9, 0x8f, 0x30, 0xd5, 0x19, 0xf3, 0xca, 0xd7, + 0xdf, 0x2c, 0xbe, 0x3b, 0xc4, 0x20, 0x17, 0x91, 0x6d, 0xbd, 0xeb, 0x3b, 0x1e, 0xab, 0x1a, 0x63, + 0xd9, 0xe4, 0xa2, 0x99, 0xfd, 0xcd, 0x1f, 0xae, 0xce, 0xac, 0xd8, 0x70, 0x69, 0xe4, 0xc3, 0x0e, + 0xb9, 0x0e, 0xd9, 0x03, 0xe6, 0xb5, 0x13, 0x33, 0x23, 0x0c, 0xb8, 0x48, 0xae, 0x41, 0x86, 0xda, + 0xa7, 0x55, 0xc3, 0xac, 0x7d, 0xfd, 0xcd, 0xe2, 0xe5, 0xd1, 0xaf, 0x43, 0xf6, 0xa9, 0x34, 0xb1, + 0xfa, 0x57, 0x80, 0xc2, 0xde, 0xde, 0xfa, 0x7a, 0xe4, 0x3a, 0x1d, 0x46, 0x7e, 0x65, 0x00, 0x19, + 0x7d, 0x33, 0x93, 0x7b, 0xe9, 0x0d, 0x61, 0xfc, 0x37, 0x0a, 0xf3, 0xfe, 0x94, 0x52, 0x0a, 0xb2, + 0x7c, 0x0e, 0x73, 0x02, 0x67, 0x93, 0x8f, 0x26, 0x7c, 0x6e, 0x99, 0xcb, 0x17, 0x33, 0x2a, 0xdd, + 0x2d, 0xc8, 0x27, 0x58, 0x95, 0xac, 0xa4, 0x6e, 0x6f, 0x00, 0x8a, 0x9b, 0xb7, 0x26, 0xe2, 0x55, + 0x46, 0x7e, 0x0e, 0xf3, 0x0a, 0x82, 0x92, 0x9b, 0x17, 0xc8, 0xf5, 0xc1, 0xb0, 0xb9, 0x32, 0x09, + 0x6b, 0xdf, 0x8d, 0x04, 0x6a, 0xa6, 0xba, 0x31, 0x04, 0x64, 0x53, 0xdd, 0x18, 0xc1, 0xae, 0xad, + 0xfe, 0x03, 0x35, 0xd5, 0xc8, 0x10, 0x70, 0x4d, 0x35, 0x32, 0x8c, 0x5f, 0xc9, 0x4b, 0xc8, 0x22, + 0x7e, 0x25, 0x69, 0xbd, 0x5a, 0x03, 0xb8, 0x66, 0xda, 0x99, 0x18, 0x00, 0xbe, 0x3f, 0xc3, 0x3b, + 0x4d, 0x7c, 0x8b, 0x48, 0xbf, 0xcd, 0xb4, 0x6f, 0x97, 0xe6, 0xcd, 0x09, 0x38, 0xfb, 0xea, 0xd5, + 0x3b, 0x7e, 0x79, 0x82, 0x0f, 0x88, 0x17, 0xab, 0x1f, 0xfa, 0x54, 0x19, 0x40, 0x49, 0x87, 0x2a, + 0xa4, 0x9e, 0x22, 0x3a, 0x06, 0x01, 0x9a, 0x8d, 0x89, 0xf9, 0x95, 0xc1, 0xaf, 0xf0, 0x11, 0x37, + 0x08, 0x63, 0xc8, 0x6a, 0x6a, 0x38, 0xc6, 0x02, 0x26, 0xf3, 0xee, 0x54, 0x32, 0xca, 0xb8, 0x2d, + 0x61, 0x92, 0x82, 0x42, 0x24, 0xfd, 0xd6, 0xef, 0xc1, 0x29, 0x73, 0x42, 0xbe, 0x65, 0xe3, 0x8e, + 0x81, 0xe7, 0x0c, 0xa1, 0x73, 0xaa, 0x6e, 0xed, 0x4d, 0x91, 0x7a, 0xce, 0x74, 0x0c, 0xbe, 0x5e, + 0xfa, 0xee, 0xcd, 0x55, 0xe3, 0x1f, 0x6f, 0xae, 0x1a, 0xff, 0x7a, 0x73, 0xd5, 0x38, 0xcc, 0x89, + 0x7f, 0x64, 0xef, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff, 0x20, 0x47, 0x7d, 0x27, 0x1a, 0x1f, 0x00, + 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -4014,6 +4049,20 @@ func (m *ResolveImageConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, erro i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.SourcePolicies) > 0 { + for iNdEx := len(m.SourcePolicies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SourcePolicies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } if len(m.StoreID) > 0 { i -= len(m.StoreID) copy(dAtA[i:], m.StoreID) @@ -4093,6 +4142,13 @@ func (m *ResolveImageConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, err i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0x1a + } if len(m.Config) > 0 { i -= len(m.Config) copy(dAtA[i:], m.Config) @@ -4964,6 +5020,13 @@ func (m *NewContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x3a + } if len(m.ExtraHosts) > 0 { for iNdEx := len(m.ExtraHosts) - 1; iNdEx >= 0; iNdEx-- { { @@ -5333,6 +5396,20 @@ func (m *InitMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Secretenv) > 0 { + for iNdEx := len(m.Secretenv) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secretenv[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } if m.Security != 0 { i = encodeVarintGateway(dAtA, i, uint64(m.Security)) i-- @@ -5958,6 +6035,12 @@ func (m *ResolveImageConfigRequest) Size() (n int) { if l > 0 { n += 1 + l + sovGateway(uint64(l)) } + if len(m.SourcePolicies) > 0 { + for _, e := range m.SourcePolicies { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5978,6 +6061,10 @@ func (m *ResolveImageConfigResponse) Size() (n int) { if l > 0 { n += 1 + l + sovGateway(uint64(l)) } + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6391,6 +6478,10 @@ func (m *NewContainerRequest) Size() (n int) { n += 1 + l + sovGateway(uint64(l)) } } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6567,6 +6658,12 @@ func (m *InitMessage) Size() (n int) { if m.Security != 0 { n += 1 + sovGateway(uint64(m.Security)) } + if len(m.Secretenv) > 0 { + for _, e := range m.Secretenv { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -8800,6 +8897,40 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { } m.StoreID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePolicies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourcePolicies = append(m.SourcePolicies, &pb1.Policy{}) + if err := m.SourcePolicies[len(m.SourcePolicies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -8917,6 +9048,38 @@ func (m *ResolveImageConfigResponse) Unmarshal(dAtA []byte) error { m.Config = []byte{} } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -11463,6 +11626,38 @@ func (m *NewContainerRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -12210,6 +12405,40 @@ func (m *InitMessage) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secretenv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secretenv = append(m.Secretenv, &pb.SecretEnv{}) + if err := m.Secretenv[len(m.Secretenv)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto index 2e55f1db86..c00d97391a 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto @@ -11,6 +11,7 @@ import "github.com/tonistiigi/fsutil/types/stat.proto"; import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto"; + option (gogoproto.sizer_all) = true; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; @@ -38,7 +39,7 @@ service LLBBridge { rpc ExecProcess(stream ExecMessage) returns (stream ExecMessage); // apicaps:CapGatewayWarnings - rpc Warn(WarnRequest) returns (WarnResponse); + rpc Warn(WarnRequest) returns (WarnResponse); } message Result { @@ -124,11 +125,13 @@ message ResolveImageConfigRequest { int32 ResolverType = 5; string SessionID = 6; string StoreID = 7; + repeated moby.buildkit.v1.sourcepolicy.Policy SourcePolicies = 8; } message ResolveImageConfigResponse { string Digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; bytes Config = 2; + string Ref = 3; } message SolveRequest { @@ -138,7 +141,7 @@ message SolveRequest { // 4 was removed in BuildKit v0.11.0. bool allowResultReturn = 5; bool allowResultArrayRef = 6; - + // apicaps.CapSolveInlineReturn deprecated bool Final = 10; bytes ExporterAttr = 11; @@ -165,7 +168,7 @@ message SolveResponse { string ref = 1; // can be used by readfile request // deprecated /* bytes ExporterAttr = 2;*/ - + // these fields are returned when allowMapReturn was set Result result = 3; } @@ -239,6 +242,7 @@ message NewContainerRequest { pb.Platform platform = 4; pb.WorkerConstraints constraints = 5; repeated pb.HostIP extraHosts = 6; + string hostname = 7; } message NewContainerResponse{} @@ -255,7 +259,7 @@ message ExecMessage { // InitMessage sent from client to server will start a new process in a // container InitMessage Init = 2; - // FdMessage used from client to server for input (stdin) and + // FdMessage used from client to server for input (stdin) and // from server to client for output (stdout, stderr) FdMessage File = 3; // ResizeMessage used from client to server for terminal resize events @@ -280,6 +284,7 @@ message InitMessage{ repeated uint32 Fds = 3; bool Tty = 4; pb.SecurityMode Security = 5; + repeated pb.SecretEnv secretenv = 6; } message ExitMessage { diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.go index e313542629..d299d7ad9e 100644 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.go +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.go @@ -4,8 +4,11 @@ import ( "context" "fmt" io "io" + "net/url" "os" + "strconv" "strings" + "unicode" "github.com/moby/buildkit/session" "github.com/pkg/errors" @@ -82,6 +85,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr } opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object + opts = decodeOpts(opts) dirName := "" name, ok := opts[keyDirName] @@ -209,6 +213,8 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { var stream grpc.ClientStream + opts = encodeOpts(opts) + ctx = metadata.NewOutgoingContext(ctx, opts) switch pr.name { @@ -280,7 +286,7 @@ func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) (err error) { } defer func() { err1 := wc.Close() - if err != nil { + if err == nil { err = err1 } }() @@ -337,3 +343,60 @@ func (e InvalidSessionError) Error() string { func (e InvalidSessionError) Unwrap() error { return e.err } + +func encodeOpts(opts map[string][]string) map[string][]string { + md := make(map[string][]string, len(opts)) + for k, v := range opts { + out, encoded := encodeStringForHeader(v) + md[k] = out + if encoded { + md[k+"-encoded"] = []string{"1"} + } + } + return md +} + +func decodeOpts(opts map[string][]string) map[string][]string { + md := make(map[string][]string, len(opts)) + for k, v := range opts { + out := make([]string, len(v)) + var isDecoded bool + if v, ok := opts[k+"-encoded"]; ok && len(v) > 0 { + if b, _ := strconv.ParseBool(v[0]); b { + isDecoded = true + } + } + if isDecoded { + for i, s := range v { + out[i], _ = url.QueryUnescape(s) + } + } else { + copy(out, v) + } + md[k] = out + } + return md +} + +// encodeStringForHeader encodes a string value so it can be used in grpc header. This encoding +// is backwards compatible and avoids encoding ASCII characters. +func encodeStringForHeader(inputs []string) ([]string, bool) { + var encode bool + for _, input := range inputs { + for _, runeVal := range input { + // Only encode non-ASCII characters, and characters that have special + // meaning during decoding. + if runeVal > unicode.MaxASCII { + encode = true + break + } + } + } + if !encode { + return inputs, false + } + for i, input := range inputs { + inputs[i] = url.QueryEscape(input) + } + return inputs, true +} diff --git a/vendor/github.com/moby/buildkit/session/grpc.go b/vendor/github.com/moby/buildkit/session/grpc.go index 6fac82e0b0..bf8180722a 100644 --- a/vendor/github.com/moby/buildkit/session/grpc.go +++ b/vendor/github.com/moby/buildkit/session/grpc.go @@ -134,7 +134,7 @@ func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func()) } } - bklog.G(ctx).WithFields(logFields).Debug("healthcheck completed") + bklog.G(ctx).WithFields(logFields).Trace("healthcheck completed") } } } diff --git a/vendor/github.com/moby/buildkit/session/sshforward/copy.go b/vendor/github.com/moby/buildkit/session/sshforward/copy.go index a4a065b46e..eac5f7614a 100644 --- a/vendor/github.com/moby/buildkit/session/sshforward/copy.go +++ b/vendor/github.com/moby/buildkit/session/sshforward/copy.go @@ -24,10 +24,12 @@ func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStre if err == io.EOF { // indicates client performed CloseSend, but they may still be // reading data - if conn, ok := conn.(interface { + if closeWriter, ok := conn.(interface { CloseWrite() error }); ok { - conn.CloseWrite() + closeWriter.CloseWrite() + } else { + conn.Close() } return nil } diff --git a/vendor/github.com/moby/buildkit/snapshot/containerd/content.go b/vendor/github.com/moby/buildkit/snapshot/containerd/content.go index 3c730523a7..b4bb2f300b 100644 --- a/vendor/github.com/moby/buildkit/snapshot/containerd/content.go +++ b/vendor/github.com/moby/buildkit/snapshot/containerd/content.go @@ -5,64 +5,80 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/namespaces" + "github.com/containerd/nydus-snapshotter/pkg/errdefs" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -func NewContentStore(store content.Store, ns string) content.Store { - return &nsContent{ns, store} +func NewContentStore(store content.Store, ns string) *Store { + return &Store{ns, store} } -type nsContent struct { +type Store struct { ns string content.Store } -func (c *nsContent) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { +func (c *Store) Namespace() string { + return c.ns +} + +func (c *Store) WithNamespace(ns string) *Store { + return NewContentStore(c.Store, ns) +} + +func (c *Store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.Info(ctx, dgst) } -func (c *nsContent) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { +func (c *Store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.Update(ctx, info, fieldpaths...) } -func (c *nsContent) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { +func (c *Store) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.Walk(ctx, fn, filters...) } -func (c *nsContent) Delete(ctx context.Context, dgst digest.Digest) error { +func (c *Store) Delete(ctx context.Context, dgst digest.Digest) error { return errors.Errorf("contentstore.Delete usage is forbidden") } -func (c *nsContent) Status(ctx context.Context, ref string) (content.Status, error) { +func (c *Store) Status(ctx context.Context, ref string) (content.Status, error) { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.Status(ctx, ref) } -func (c *nsContent) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { +func (c *Store) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.ListStatuses(ctx, filters...) } -func (c *nsContent) Abort(ctx context.Context, ref string) error { +func (c *Store) Abort(ctx context.Context, ref string) error { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.Abort(ctx, ref) } -func (c *nsContent) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { +func (c *Store) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.ReaderAt(ctx, desc) } -func (c *nsContent) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { +func (c *Store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { return c.writer(ctx, 3, opts...) } -func (c *nsContent) writer(ctx context.Context, retries int, opts ...content.WriterOpt) (content.Writer, error) { +func (c *Store) WithFallbackNS(ns string) content.Store { + return &nsFallbackStore{ + main: c, + fb: c.WithNamespace(ns), + } +} + +func (c *Store) writer(ctx context.Context, retries int, opts ...content.WriterOpt) (content.Writer, error) { ctx = namespaces.WithNamespace(ctx, c.ns) w, err := c.Store.Writer(ctx, opts...) if err != nil { @@ -80,3 +96,58 @@ func (w *nsWriter) Commit(ctx context.Context, size int64, expected digest.Diges ctx = namespaces.WithNamespace(ctx, w.ns) return w.Writer.Commit(ctx, size, expected, opts...) } + +type nsFallbackStore struct { + main *Store + fb *Store +} + +var _ content.Store = &nsFallbackStore{} + +func (c *nsFallbackStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + info, err := c.main.Info(ctx, dgst) + if err != nil { + if errdefs.IsNotFound(err) { + return c.fb.Info(ctx, dgst) + } + } + return info, err +} + +func (c *nsFallbackStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + return c.main.Update(ctx, info, fieldpaths...) +} + +func (c *nsFallbackStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { + return c.main.Walk(ctx, fn, filters...) +} + +func (c *nsFallbackStore) Delete(ctx context.Context, dgst digest.Digest) error { + return c.main.Delete(ctx, dgst) +} + +func (c *nsFallbackStore) Status(ctx context.Context, ref string) (content.Status, error) { + return c.main.Status(ctx, ref) +} + +func (c *nsFallbackStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { + return c.main.ListStatuses(ctx, filters...) +} + +func (c *nsFallbackStore) Abort(ctx context.Context, ref string) error { + return c.main.Abort(ctx, ref) +} + +func (c *nsFallbackStore) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { + ra, err := c.main.ReaderAt(ctx, desc) + if err != nil { + if errdefs.IsNotFound(err) { + return c.fb.ReaderAt(ctx, desc) + } + } + return ra, err +} + +func (c *nsFallbackStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + return c.main.Writer(ctx, opts...) +} diff --git a/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go b/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go index 136d7a6282..c4875000ea 100644 --- a/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go +++ b/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go @@ -167,8 +167,7 @@ func applierFor(dest Mountable, tryCrossSnapshotLink, userxattr bool) (_ *applie } mnt := mnts[0] - switch mnt.Type { - case "overlay": + if overlay.IsOverlayMountType(mnt) { for _, opt := range mnt.Options { if strings.HasPrefix(opt, "upperdir=") { a.root = strings.TrimPrefix(opt, "upperdir=") @@ -183,9 +182,9 @@ func applierFor(dest Mountable, tryCrossSnapshotLink, userxattr bool) (_ *applie return nil, errors.Errorf("could not find lowerdir in mount options %v", mnt.Options) } a.createWhiteoutDelete = true - case "bind", "rbind": + } else if mnt.Type == "bind" || mnt.Type == "rbind" { a.root = mnt.Source - default: + } else { mnter := LocalMounter(dest) root, err := mnter.Mount() if err != nil { @@ -570,10 +569,9 @@ func differFor(lowerMntable, upperMntable Mountable) (_ *differ, rerr error) { } if len(upperMnts) == 1 { - switch upperMnts[0].Type { - case "bind", "rbind": + if upperMnts[0].Type == "bind" || upperMnts[0].Type == "rbind" { d.upperBindSource = upperMnts[0].Source - case "overlay": + } else if overlay.IsOverlayMountType(upperMnts[0]) { overlayDirs, err := overlay.GetOverlayLayers(upperMnts[0]) if err != nil { return nil, errors.Wrapf(err, "failed to get overlay layers from mount %+v", upperMnts[0]) diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go b/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go index df2e99b6c1..0e0a37fe67 100644 --- a/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go +++ b/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go @@ -1,16 +1,20 @@ package snapshot import ( + "os" + + "github.com/Microsoft/go-winio/pkg/bindfilter" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" "github.com/pkg/errors" + "golang.org/x/sys/windows" ) func (lm *localMounter) Mount() (string, error) { lm.mu.Lock() defer lm.mu.Unlock() - if lm.mounts == nil { + if lm.mounts == nil && lm.mountable != nil { mounts, release, err := lm.mountable.Mount() if err != nil { return "", err @@ -26,27 +30,30 @@ func (lm *localMounter) Mount() (string, error) { } m := lm.mounts[0] + dir, err := os.MkdirTemp("", "buildkit-mount") + if err != nil { + return "", errors.Wrap(err, "failed to create temp dir") + } if m.Type == "bind" || m.Type == "rbind" { - ro := false - for _, opt := range m.Options { - if opt == "ro" { - ro = true - break - } - } - if !ro { + if !m.ReadOnly() { + // This is a rw bind mount, we can simply return the source. + // NOTE(gabriel-samfira): This is safe to do if the source of the bind mount is a DOS path + // of a local folder. If it's a \\?\Volume{} (for any reason that I can't think of now) + // we should allow bindfilter.ApplyFileBinding() to mount it. return m.Source, nil } + // The Windows snapshotter does not have any notion of bind mounts. We emulate + // bind mounts here using the bind filter. + if err := bindfilter.ApplyFileBinding(dir, m.Source, m.ReadOnly()); err != nil { + return "", errors.Wrapf(err, "failed to mount %v: %+v", m, err) + } + } else { + if err := m.Mount(dir); err != nil { + return "", errors.Wrapf(err, "failed to mount %v: %+v", m, err) + } } - // Windows mounts always activate in-place, so the target of the mount must be the source directory. - // See https://github.com/containerd/containerd/pull/2366 - dir := m.Source - - if err := m.Mount(dir); err != nil { - return "", errors.Wrapf(err, "failed to mount in-place: %v", m) - } lm.target = dir return lm.target, nil } @@ -55,10 +62,34 @@ func (lm *localMounter) Unmount() error { lm.mu.Lock() defer lm.mu.Unlock() + // NOTE(gabriel-samfira): Should we just return nil if len(lm.mounts) == 0? + // Calling Mount() would fail on an instance of the localMounter where mounts contains + // anything other than 1 mount. + if len(lm.mounts) != 1 { + return errors.Wrapf(errdefs.ErrNotImplemented, "request to mount %d layers, only 1 is supported", len(lm.mounts)) + } + m := lm.mounts[0] + if lm.target != "" { - if err := mount.Unmount(lm.target, 0); err != nil { - return err + if m.Type == "bind" || m.Type == "rbind" { + if err := bindfilter.RemoveFileBinding(lm.target); err != nil { + // The following two errors denote that lm.target is not a mount point. + if !errors.Is(err, windows.ERROR_INVALID_PARAMETER) && !errors.Is(err, windows.ERROR_NOT_FOUND) { + return errors.Wrapf(err, "failed to unmount %v: %+v", lm.target, err) + } + } + } else { + // The containerd snapshotter uses the bind filter internally to mount windows-layer + // volumes. We use same bind filter here to emulate bind mounts. In theory we could + // simply call mount.Unmount() here, without the extra check for bind mounts and explicit + // call to bindfilter.RemoveFileBinding() (above), but this would operate under the + // assumption that the internal implementation in containerd will always be based on the + // bind filter, which feels brittle. + if err := mount.Unmount(lm.target, 0); err != nil { + return errors.Wrapf(err, "failed to unmount %v: %+v", lm.target, err) + } } + os.RemoveAll(lm.target) lm.target = "" } diff --git a/vendor/github.com/moby/buildkit/snapshot/snapshotter.go b/vendor/github.com/moby/buildkit/snapshot/snapshotter.go index edf95cee70..f5c59f1735 100644 --- a/vendor/github.com/moby/buildkit/snapshot/snapshotter.go +++ b/vendor/github.com/moby/buildkit/snapshot/snapshotter.go @@ -167,6 +167,8 @@ func setRedirectDir(mounts []mount.Mount, redirectDirOption string) (ret []mount return mounts } for _, m := range mounts { + // Replace redirect_dir options, but only for overlay. + // redirect_dir is not supported by fuse-overlayfs. if m.Type == "overlay" { var opts []string for _, o := range m.Options { diff --git a/vendor/github.com/moby/buildkit/solver/cachemanager.go b/vendor/github.com/moby/buildkit/solver/cachemanager.go index f8bfbd23dd..5f5d9f33e2 100644 --- a/vendor/github.com/moby/buildkit/solver/cachemanager.go +++ b/vendor/github.com/moby/buildkit/solver/cachemanager.go @@ -25,7 +25,7 @@ func NewCacheManager(ctx context.Context, id string, storage CacheKeyStorage, re results: results, } - if err := cm.ReleaseUnreferenced(); err != nil { + if err := cm.ReleaseUnreferenced(ctx); err != nil { bklog.G(ctx).Errorf("failed to release unreferenced cache metadata: %+v", err) } @@ -40,10 +40,10 @@ type cacheManager struct { results CacheResultStorage } -func (c *cacheManager) ReleaseUnreferenced() error { +func (c *cacheManager) ReleaseUnreferenced(ctx context.Context) error { return c.backend.Walk(func(id string) error { return c.backend.WalkResults(id, func(cr CacheResult) error { - if !c.results.Exists(cr.ID) { + if !c.results.Exists(ctx, cr.ID) { c.backend.Release(cr.ID) } return nil @@ -112,10 +112,10 @@ func (c *cacheManager) Query(deps []CacheKeyWithSelector, input Index, dgst dige return keys, nil } -func (c *cacheManager) Records(ck *CacheKey) ([]*CacheRecord, error) { +func (c *cacheManager) Records(ctx context.Context, ck *CacheKey) ([]*CacheRecord, error) { outs := make([]*CacheRecord, 0) if err := c.backend.WalkResults(c.getID(ck), func(r CacheResult) error { - if c.results.Exists(r.ID) { + if c.results.Exists(ctx, r.ID) { outs = append(outs, &CacheRecord{ ID: r.ID, cacheManager: c, @@ -217,6 +217,11 @@ func (c *cacheManager) LoadWithParents(ctx context.Context, rec *CacheRecord) ([ r.Release(context.TODO()) } } + for _, r := range m { + // refs added to results are deleted from m by filterResults + // so release any leftovers + r.Release(context.TODO()) + } return results, nil } diff --git a/vendor/github.com/moby/buildkit/solver/cachestorage.go b/vendor/github.com/moby/buildkit/solver/cachestorage.go index 77724ac4c4..7f426fbedf 100644 --- a/vendor/github.com/moby/buildkit/solver/cachestorage.go +++ b/vendor/github.com/moby/buildkit/solver/cachestorage.go @@ -49,5 +49,5 @@ type CacheResultStorage interface { Save(Result, time.Time) (CacheResult, error) Load(ctx context.Context, res CacheResult) (Result, error) LoadRemotes(ctx context.Context, res CacheResult, compression *compression.Config, s session.Group) ([]*Remote, error) - Exists(id string) bool + Exists(ctx context.Context, id string) bool } diff --git a/vendor/github.com/moby/buildkit/solver/combinedcache.go b/vendor/github.com/moby/buildkit/solver/combinedcache.go index 89361bcc04..ffffbb3953 100644 --- a/vendor/github.com/moby/buildkit/solver/combinedcache.go +++ b/vendor/github.com/moby/buildkit/solver/combinedcache.go @@ -100,7 +100,7 @@ func (cm *combinedCacheManager) Save(key *CacheKey, s Result, createdAt time.Tim return cm.main.Save(key, s, createdAt) } -func (cm *combinedCacheManager) Records(ck *CacheKey) ([]*CacheRecord, error) { +func (cm *combinedCacheManager) Records(ctx context.Context, ck *CacheKey) ([]*CacheRecord, error) { if len(ck.ids) == 0 { return nil, errors.Errorf("no results") } @@ -112,7 +112,7 @@ func (cm *combinedCacheManager) Records(ck *CacheKey) ([]*CacheRecord, error) { for c := range ck.ids { func(c *cacheManager) { eg.Go(func() error { - recs, err := c.Records(ck) + recs, err := c.Records(ctx, ck) if err != nil { return err } diff --git a/vendor/github.com/moby/buildkit/solver/edge.go b/vendor/github.com/moby/buildkit/solver/edge.go index 5e3068010f..3e4ec18242 100644 --- a/vendor/github.com/moby/buildkit/solver/edge.go +++ b/vendor/github.com/moby/buildkit/solver/edge.go @@ -405,7 +405,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) { } else { for _, k := range keys { k.vtx = e.edge.Vertex.Digest() - records, err := e.op.Cache().Records(k) + records, err := e.op.Cache().Records(context.Background(), k) if err != nil { bklog.G(context.TODO()).Errorf("error receiving cache records: %v", err) continue @@ -583,7 +583,7 @@ func (e *edge) recalcCurrentState() { } } - records, err := e.op.Cache().Records(mergedKey) + records, err := e.op.Cache().Records(context.Background(), mergedKey) if err != nil { bklog.G(context.TODO()).Errorf("error receiving cache records: %v", err) continue diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go b/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go index e8af9ff233..aed3045bf1 100644 --- a/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go +++ b/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go @@ -3,7 +3,7 @@ package errdefs import ( fmt "fmt" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" "github.com/moby/buildkit/util/grpcerrors" ) diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/solve.go b/vendor/github.com/moby/buildkit/solver/errdefs/solve.go index 3cbf8097ee..d7b9e7799a 100644 --- a/vendor/github.com/moby/buildkit/solver/errdefs/solve.go +++ b/vendor/github.com/moby/buildkit/solver/errdefs/solve.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" "github.com/golang/protobuf/jsonpb" //nolint:staticcheck "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/grpcerrors" diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go b/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go index b30eab3f66..8527f2a791 100644 --- a/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go +++ b/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go @@ -3,7 +3,7 @@ package errdefs import ( fmt "fmt" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" "github.com/moby/buildkit/util/grpcerrors" ) diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/vertex.go b/vendor/github.com/moby/buildkit/solver/errdefs/vertex.go index 4ec375165d..5c2e03d133 100644 --- a/vendor/github.com/moby/buildkit/solver/errdefs/vertex.go +++ b/vendor/github.com/moby/buildkit/solver/errdefs/vertex.go @@ -1,7 +1,7 @@ package errdefs import ( - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" "github.com/moby/buildkit/util/grpcerrors" digest "github.com/opencontainers/go-digest" ) diff --git a/vendor/github.com/moby/buildkit/solver/jobs.go b/vendor/github.com/moby/buildkit/solver/jobs.go index 27e1534861..ec203257e3 100644 --- a/vendor/github.com/moby/buildkit/solver/jobs.go +++ b/vendor/github.com/moby/buildkit/solver/jobs.go @@ -342,6 +342,13 @@ func (jl *Solver) loadUnlocked(v, parent Vertex, j *Job, cache map[Vertex]Vertex // if same vertex is already loaded without cache just use that st, ok := jl.actives[dgstWithoutCache] + if ok { + // When matching an existing active vertext by dgstWithoutCache, set v to the + // existing active vertex, as otherwise the original vertex will use an + // incorrect digest and can incorrectly delete it while it is still in use. + v = st.vtx + } + if !ok { st, ok = jl.actives[dgst] @@ -533,6 +540,8 @@ func (wp *withProvenance) WalkProvenance(ctx context.Context, f func(ProvenanceP if wp.j == nil { return nil } + wp.j.list.mu.RLock() + defer wp.j.list.mu.RUnlock() m := map[digest.Digest]struct{}{} return wp.j.walkProvenance(ctx, wp.e, f, m) } @@ -652,9 +661,11 @@ type execRes struct { } type sharedOp struct { - resolver ResolveOpFunc - st *state - g flightcontrol.Group + resolver ResolveOpFunc + st *state + gDigest flightcontrol.Group[digest.Digest] + gCacheRes flightcontrol.Group[[]*CacheMap] + gExecRes flightcontrol.Group[*execRes] opOnce sync.Once op Op @@ -679,7 +690,18 @@ func (s *sharedOp) IgnoreCache() bool { } func (s *sharedOp) Cache() CacheManager { - return s.st.combinedCacheManager() + return &cacheWithCacheOpts{s.st.combinedCacheManager(), s.st} +} + +type cacheWithCacheOpts struct { + CacheManager + st *state +} + +func (c cacheWithCacheOpts) Records(ctx context.Context, ck *CacheKey) ([]*CacheRecord, error) { + // Allow Records accessing to cache opts through ctx. This enable to use remote provider + // during checking the cache existence. + return c.CacheManager.Records(withAncestorCacheOpts(ctx, c.st), ck) } func (s *sharedOp) LoadCache(ctx context.Context, rec *CacheRecord) (Result, error) { @@ -705,7 +727,7 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF err = errdefs.WrapVertex(err, s.st.origDigest) }() flightControlKey := fmt.Sprintf("slow-compute-%d", index) - key, err := s.g.Do(ctx, flightControlKey, func(ctx context.Context) (interface{}, error) { + key, err := s.gDigest.Do(ctx, flightControlKey, func(ctx context.Context) (digest.Digest, error) { s.slowMu.Lock() // TODO: add helpers for these stored values if res, ok := s.slowCacheRes[index]; ok { @@ -714,7 +736,7 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF } if err := s.slowCacheErr[index]; err != nil { s.slowMu.Unlock() - return nil, err + return "", err } s.slowMu.Unlock() @@ -722,7 +744,7 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF if p != nil { st := s.st.solver.getState(s.st.vtx.Inputs()[index]) if st == nil { - return nil, errors.Errorf("failed to get state for index %d on %v", index, s.st.vtx.Name()) + return "", errors.Errorf("failed to get state for index %d on %v", index, s.st.vtx.Name()) } ctx2 := progress.WithProgress(ctx, st.mpw) if st.mspan.Span != nil { @@ -773,7 +795,7 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF notifyCompleted(err, false) return "", err } - return key.(digest.Digest), nil + return key, nil } func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, err error) { @@ -786,7 +808,7 @@ func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, return nil, err } flightControlKey := fmt.Sprintf("cachemap-%d", index) - res, err := s.g.Do(ctx, flightControlKey, func(ctx context.Context) (ret interface{}, retErr error) { + res, err := s.gCacheRes.Do(ctx, flightControlKey, func(ctx context.Context) (ret []*CacheMap, retErr error) { if s.cacheRes != nil && s.cacheDone || index < len(s.cacheRes) { return s.cacheRes, nil } @@ -842,11 +864,11 @@ func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, return nil, err } - if len(res.([]*CacheMap)) <= index { + if len(res) <= index { return s.CacheMap(ctx, index) } - return &cacheMapResp{CacheMap: res.([]*CacheMap)[index], complete: s.cacheDone}, nil + return &cacheMapResp{CacheMap: res[index], complete: s.cacheDone}, nil } func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, exporters []ExportableCacheKey, err error) { @@ -859,7 +881,7 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, return nil, nil, err } flightControlKey := "exec" - res, err := s.g.Do(ctx, flightControlKey, func(ctx context.Context) (ret interface{}, retErr error) { + res, err := s.gExecRes.Do(ctx, flightControlKey, func(ctx context.Context) (ret *execRes, retErr error) { if s.execDone { if s.execErr != nil { return nil, s.execErr @@ -921,8 +943,7 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, if res == nil || err != nil { return nil, nil, err } - r := res.(*execRes) - return unwrapShared(r.execRes), r.execExporters, nil + return unwrapShared(res.execRes), res.execExporters, nil } func (s *sharedOp) getOp() (Op, error) { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go index 185fe81f06..27dc133620 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go @@ -155,7 +155,7 @@ type resultProxy struct { id string b *provenanceBridge req frontend.SolveRequest - g flightcontrol.Group + g flightcontrol.Group[solver.CachedResult] mu sync.Mutex released bool v solver.CachedResult @@ -177,6 +177,9 @@ func (rp *resultProxy) Definition() *pb.Definition { } func (rp *resultProxy) Provenance() interface{} { + if rp.provenance == nil { + return nil + } return rp.provenance } @@ -241,7 +244,7 @@ func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err defer func() { err = rp.wrapError(err) }() - r, err := rp.g.Do(ctx, "result", func(ctx context.Context) (interface{}, error) { + return rp.g.Do(ctx, "result", func(ctx context.Context) (solver.CachedResult, error) { rp.mu.Lock() if rp.released { rp.mu.Unlock() @@ -270,30 +273,27 @@ func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err rp.mu.Unlock() return nil, errors.Errorf("evaluating released result") } - rp.v = v - rp.err = err if err == nil { - capture, err := captureProvenance(ctx, v) - if err != nil && rp.err != nil { - rp.err = errors.Wrapf(rp.err, "failed to capture provenance: %v", err) + var capture *provenance.Capture + capture, err = captureProvenance(ctx, v) + if err != nil { + err = errors.Errorf("failed to capture provenance: %v", err) v.Release(context.TODO()) - rp.v = nil + v = nil } rp.provenance = capture } + rp.v = v + rp.err = err rp.mu.Unlock() return v, err }) - if r != nil { - return r.(solver.CachedResult), nil - } - return nil, err } -func (b *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) { +func (b *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (resolvedRef string, dgst digest.Digest, config []byte, err error) { w, err := b.resolveWorker() if err != nil { - return "", nil, err + return "", "", nil, err } if opt.LogName == "" { opt.LogName = fmt.Sprintf("resolve image config for %s", ref) @@ -304,11 +304,18 @@ func (b *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb. } else { id += platforms.Format(*platform) } + pol, err := loadSourcePolicy(b.builder) + if err != nil { + return "", "", nil, err + } + if pol != nil { + opt.SourcePolicies = append(opt.SourcePolicies, pol) + } err = inBuilderContext(ctx, b.builder, opt.LogName, id, func(ctx context.Context, g session.Group) error { - dgst, config, err = w.ResolveImageConfig(ctx, ref, opt, b.sm, g) + resolvedRef, dgst, config, err = w.ResolveImageConfig(ctx, ref, opt, b.sm, g) return err }) - return dgst, config, err + return resolvedRef, dgst, config, err } type lazyCacheManager struct { @@ -329,12 +336,12 @@ func (lcm *lazyCacheManager) Query(inp []solver.CacheKeyWithSelector, inputIndex } return lcm.main.Query(inp, inputIndex, dgst, outputIndex) } -func (lcm *lazyCacheManager) Records(ck *solver.CacheKey) ([]*solver.CacheRecord, error) { +func (lcm *lazyCacheManager) Records(ctx context.Context, ck *solver.CacheKey) ([]*solver.CacheRecord, error) { lcm.wait() if lcm.main == nil { return nil, nil } - return lcm.main.Records(ck) + return lcm.main.Records(ctx, ck) } func (lcm *lazyCacheManager) Load(ctx context.Context, rec *solver.CacheRecord) (solver.Result, error) { if err := lcm.wait(); err != nil { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go index 974c2e04e8..6212066cd9 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go @@ -5,6 +5,7 @@ import ( "log" "os" "path/filepath" + "runtime" "strings" "time" @@ -13,6 +14,7 @@ import ( "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/system" "github.com/pkg/errors" copy "github.com/tonistiigi/fsutil/copy" ) @@ -66,7 +68,7 @@ func mapUserToChowner(user *copy.User, idmap *idtools.IdentityMapping) (copy.Cho } func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.User, idmap *idtools.IdentityMapping) error { - p, err := fs.RootPath(d, filepath.Join("/", action.Path)) + p, err := fs.RootPath(d, action.Path) if err != nil { return err } @@ -126,7 +128,10 @@ func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *cop func rm(ctx context.Context, d string, action pb.FileActionRm) error { if action.AllowWildcard { - src := cleanPath(action.Path) + src, err := cleanPath(action.Path) + if err != nil { + return errors.Wrap(err, "cleaning path") + } m, err := copy.ResolveWildcards(d, src, false) if err != nil { return err @@ -167,9 +172,14 @@ func rmPath(root, src string, allowNotFound bool) error { } func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *copy.User, idmap *idtools.IdentityMapping) error { - srcPath := cleanPath(action.Src) - destPath := cleanPath(action.Dest) - + srcPath, err := cleanPath(action.Src) + if err != nil { + return errors.Wrap(err, "cleaning source path") + } + destPath, err := cleanPath(action.Dest) + if err != nil { + return errors.Wrap(err, "cleaning path") + } if !action.CreateDestPath { p, err := fs.RootPath(dest, filepath.Join("/", action.Dest)) if err != nil { @@ -244,19 +254,6 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u * return nil } -func cleanPath(s string) string { - s2 := filepath.Join("/", s) - if strings.HasSuffix(s, "/.") { - if s2 != "/" { - s2 += "/" - } - s2 += "." - } else if strings.HasSuffix(s, "/") && s2 != "/" { - s2 += "/" - } - return s2 -} - type Backend struct { } @@ -349,3 +346,21 @@ func (fb *Backend) Copy(ctx context.Context, m1, m2, user, group fileoptypes.Mou return docopy(ctx, src, dest, action, u, mnt2.m.IdentityMapping()) } + +func cleanPath(s string) (string, error) { + s, err := system.CheckSystemDriveAndRemoveDriveLetter(s, runtime.GOOS) + if err != nil { + return "", errors.Wrap(err, "removing drive letter") + } + s = filepath.FromSlash(s) + s2 := filepath.Join("/", s) + if strings.HasSuffix(s, string(filepath.Separator)+".") { + if s2 != string(filepath.Separator) { + s2 += string(filepath.Separator) + } + s2 += "." + } else if strings.HasSuffix(s, string(filepath.Separator)) && s2 != string(filepath.Separator) { + s2 += string(filepath.Separator) + } + return s2, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/history.go b/vendor/github.com/moby/buildkit/solver/llbsolver/history.go index 09aa19855e..ac0a5dd652 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/history.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/history.go @@ -4,9 +4,12 @@ import ( "bufio" "context" "encoding/binary" + "encoding/json" "io" "os" "sort" + "strconv" + "strings" "sync" "time" @@ -16,6 +19,8 @@ import ( controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client" "github.com/moby/buildkit/cmd/buildkitd/config" + "github.com/moby/buildkit/identity" + containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/util/leaseutil" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" @@ -25,23 +30,27 @@ import ( const ( recordsBucket = "_records" + versionBucket = "_version" ) type HistoryQueueOpt struct { DB *bolt.DB - LeaseManager leases.Manager - ContentStore content.Store + LeaseManager *leaseutil.Manager + ContentStore *containerdsnapshot.Store CleanConfig *config.HistoryConfig } type HistoryQueue struct { - mu sync.Mutex - initOnce sync.Once - HistoryQueueOpt - ps *pubsub[*controlapi.BuildHistoryEvent] - active map[string]*controlapi.BuildHistoryRecord - refs map[string]int - deleted map[string]struct{} + // mu protects active, refs and deleted maps + mu sync.Mutex + initOnce sync.Once + opt HistoryQueueOpt + ps *pubsub[*controlapi.BuildHistoryEvent] + active map[string]*controlapi.BuildHistoryRecord + refs map[string]int + deleted map[string]struct{} + hContentStore *containerdsnapshot.Store + hLeaseManager *leaseutil.Manager } type StatusImportResult struct { @@ -51,15 +60,15 @@ type StatusImportResult struct { NumTotalSteps int } -func NewHistoryQueue(opt HistoryQueueOpt) *HistoryQueue { +func NewHistoryQueue(opt HistoryQueueOpt) (*HistoryQueue, error) { if opt.CleanConfig == nil { opt.CleanConfig = &config.HistoryConfig{ - MaxAge: int64((48 * time.Hour).Seconds()), + MaxAge: config.Duration{Duration: 48 * time.Hour}, MaxEntries: 50, } } h := &HistoryQueue{ - HistoryQueueOpt: opt, + opt: opt, ps: &pubsub[*controlapi.BuildHistoryEvent]{ m: map[*channel[*controlapi.BuildHistoryEvent]]struct{}{}, }, @@ -68,6 +77,39 @@ func NewHistoryQueue(opt HistoryQueueOpt) *HistoryQueue { deleted: map[string]struct{}{}, } + ns := h.opt.ContentStore.Namespace() + // double check invalid configuration + ns2 := h.opt.LeaseManager.Namespace() + if ns != ns2 { + return nil, errors.Errorf("invalid configuration: content store namespace %q does not match lease manager namespace %q", ns, ns2) + } + h.hContentStore = h.opt.ContentStore.WithNamespace(ns + "_history") + h.hLeaseManager = h.opt.LeaseManager.WithNamespace(ns + "_history") + + // v2 migration: all records need to be on isolated containerd ns from rest of buildkit + needsMigration := false + if err := h.opt.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(versionBucket)) + if b != nil { + v := b.Get([]byte("version")) + if v != nil { + vi, err := strconv.ParseInt(string(v), 10, 64) + if err == nil && vi > 1 { + return nil + } + } + } + needsMigration = true + return nil + }); err != nil { + return nil, err + } + if needsMigration { + if err := h.migrateV2(); err != nil { + return nil, err + } + } + go func() { for { h.gc() @@ -75,13 +117,158 @@ func NewHistoryQueue(opt HistoryQueueOpt) *HistoryQueue { } }() - return h + return h, nil +} + +func (h *HistoryQueue) migrateV2() error { + ctx := context.Background() + + if err := h.opt.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return nil + } + ctx, release, err := leaseutil.WithLease(ctx, h.hLeaseManager, leases.WithID("history_migration_"+identity.NewID()), leaseutil.MakeTemporary) + if err != nil { + return err + } + defer release(ctx) + return b.ForEach(func(key, dt []byte) error { + recs, err := h.opt.LeaseManager.ListResources(ctx, leases.Lease{ID: h.leaseID(string(key))}) + if err != nil { + if errdefs.IsNotFound(err) { + return nil + } + return err + } + recs2 := make([]leases.Resource, 0, len(recs)) + for _, r := range recs { + if r.Type == "content" { + if ok, err := h.migrateBlobV2(ctx, r.ID, false); err != nil { + return err + } else if ok { + recs2 = append(recs2, r) + } + } else { + return errors.Errorf("unknown resource type %q", r.Type) + } + } + + l, err := h.hLeaseManager.Create(ctx, leases.WithID(h.leaseID(string(key)))) + if err != nil { + if !errors.Is(err, errdefs.ErrAlreadyExists) { + return err + } + l = leases.Lease{ID: string(key)} + } + + for _, r := range recs2 { + if err := h.hLeaseManager.AddResource(ctx, l, r); err != nil { + return err + } + } + + return h.opt.LeaseManager.Delete(ctx, leases.Lease{ID: h.leaseID(string(key))}) + }) + }); err != nil { + return err + } + + if err := h.opt.DB.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(versionBucket)) + if err != nil { + return err + } + return b.Put([]byte("version"), []byte("2")) + }); err != nil { + return err + } + + return nil +} + +func (h *HistoryQueue) blobRefs(ctx context.Context, dgst digest.Digest, detectSkipLayer bool) ([]digest.Digest, error) { + info, err := h.opt.ContentStore.Info(ctx, dgst) + if err != nil { + return nil, err // allow missing blobs + } + var out []digest.Digest + layers := map[digest.Digest]struct{}{} + if detectSkipLayer { + dt, err := content.ReadBlob(ctx, h.opt.ContentStore, ocispecs.Descriptor{ + Digest: dgst, + }) + if err != nil { + return nil, err + } + var mfst ocispecs.Manifest + if err := json.Unmarshal(dt, &mfst); err != nil { + return nil, err + } + for _, l := range mfst.Layers { + layers[l.Digest] = struct{}{} + } + } + for k, v := range info.Labels { + if !strings.HasPrefix(k, "containerd.io/gc.ref.content.") { + continue + } + dgst, err := digest.Parse(v) + if err != nil { + continue + } + if _, ok := layers[dgst]; ok { + continue + } + out = append(out, dgst) + } + return out, nil +} + +func (h *HistoryQueue) migrateBlobV2(ctx context.Context, id string, detectSkipLayers bool) (bool, error) { + dgst, err := digest.Parse(id) + if err != nil { + return false, err + } + + refs, _ := h.blobRefs(ctx, dgst, detectSkipLayers) // allow missing blobs + labels := map[string]string{} + for i, r := range refs { + labels["containerd.io/gc.ref.content."+strconv.Itoa(i)] = r.String() + } + + w, err := content.OpenWriter(ctx, h.hContentStore, content.WithDescriptor(ocispecs.Descriptor{ + Digest: dgst, + }), content.WithRef("history-migrate-"+id)) + if err != nil { + if errdefs.IsAlreadyExists(err) { + return true, nil + } + return false, err + } + defer w.Close() + ra, err := h.opt.ContentStore.ReaderAt(ctx, ocispecs.Descriptor{ + Digest: dgst, + }) + if err != nil { + return false, nil // allow skipping + } + defer ra.Close() + if err := content.Copy(ctx, w, &reader{ReaderAt: ra}, 0, dgst, content.WithLabels(labels)); err != nil { + return false, err + } + + for _, refs := range refs { + h.migrateBlobV2(ctx, refs.String(), detectSkipLayers) // allow missing blobs + } + + return true, nil } func (h *HistoryQueue) gc() error { var records []*controlapi.BuildHistoryRecord - if err := h.DB.View(func(tx *bolt.Tx) error { + if err := h.opt.DB.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(recordsBucket)) if b == nil { return nil @@ -102,7 +289,7 @@ func (h *HistoryQueue) gc() error { } // in order for record to get deleted by gc it exceed both maxentries and maxage criteria - if len(records) < int(h.CleanConfig.MaxEntries) { + if len(records) < int(h.opt.CleanConfig.MaxEntries) { return nil } @@ -115,8 +302,8 @@ func (h *HistoryQueue) gc() error { defer h.mu.Unlock() now := time.Now() - for _, r := range records[h.CleanConfig.MaxEntries:] { - if now.Add(time.Duration(h.CleanConfig.MaxAge) * -time.Second).After(*r.CompletedAt) { + for _, r := range records[h.opt.CleanConfig.MaxEntries:] { + if now.Add(-h.opt.CleanConfig.MaxAge.Duration).After(*r.CompletedAt) { if err := h.delete(r.Ref, false); err != nil { return err } @@ -132,7 +319,11 @@ func (h *HistoryQueue) delete(ref string, sync bool) error { return nil } delete(h.deleted, ref) - if err := h.DB.Update(func(tx *bolt.Tx) error { + h.ps.Send(&controlapi.BuildHistoryEvent{ + Type: controlapi.BuildHistoryEventType_DELETED, + Record: &controlapi.BuildHistoryRecord{Ref: ref}, + }) + if err := h.opt.DB.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(recordsBucket)) if b == nil { return os.ErrNotExist @@ -142,7 +333,7 @@ func (h *HistoryQueue) delete(ref string, sync bool) error { if sync { opts = append(opts, leases.SynchronousDelete) } - err2 := h.LeaseManager.Delete(context.TODO(), leases.Lease{ID: h.leaseID(ref)}, opts...) + err2 := h.hLeaseManager.Delete(context.TODO(), leases.Lease{ID: h.leaseID(ref)}, opts...) if err1 != nil { return err1 } @@ -156,7 +347,7 @@ func (h *HistoryQueue) delete(ref string, sync bool) error { func (h *HistoryQueue) init() error { var err error h.initOnce.Do(func() { - err = h.DB.Update(func(tx *bolt.Tx) error { + err = h.opt.DB.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucketIfNotExists([]byte(recordsBucket)) return err }) @@ -168,11 +359,27 @@ func (h *HistoryQueue) leaseID(id string) string { return "ref_" + id } -func (h *HistoryQueue) addResource(ctx context.Context, l leases.Lease, desc *controlapi.Descriptor) error { +func (h *HistoryQueue) addResource(ctx context.Context, l leases.Lease, desc *controlapi.Descriptor, detectSkipLayers bool) error { if desc == nil { return nil } - return h.LeaseManager.AddResource(ctx, l, leases.Resource{ + if _, err := h.hContentStore.Info(ctx, desc.Digest); err != nil { + if errdefs.IsNotFound(err) { + ctx, release, err := leaseutil.WithLease(ctx, h.hLeaseManager, leases.WithID("history_migration_"+identity.NewID()), leaseutil.MakeTemporary) + if err != nil { + return err + } + defer release(ctx) + ok, err := h.migrateBlobV2(ctx, string(desc.Digest), detectSkipLayers) + if err != nil { + return err + } + if !ok { + return errors.Errorf("unknown blob %s in history", desc.Digest) + } + } + } + return h.hLeaseManager.AddResource(ctx, l, leases.Resource{ ID: string(desc.Digest), Type: "content", }) @@ -183,7 +390,7 @@ func (h *HistoryQueue) UpdateRef(ctx context.Context, ref string, upt func(r *co defer h.mu.Unlock() var br controlapi.BuildHistoryRecord - if err := h.DB.View(func(tx *bolt.Tx) error { + if err := h.opt.DB.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(recordsBucket)) if b == nil { return os.ErrNotExist @@ -223,7 +430,7 @@ func (h *HistoryQueue) UpdateRef(ctx context.Context, ref string, upt func(r *co func (h *HistoryQueue) Status(ctx context.Context, ref string, st chan<- *client.SolveStatus) error { h.init() var br controlapi.BuildHistoryRecord - if err := h.DB.View(func(tx *bolt.Tx) error { + if err := h.opt.DB.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(recordsBucket)) if b == nil { return os.ErrNotExist @@ -245,7 +452,7 @@ func (h *HistoryQueue) Status(ctx context.Context, ref string, st chan<- *client return nil } - ra, err := h.ContentStore.ReaderAt(ctx, ocispecs.Descriptor{ + ra, err := h.hContentStore.ReaderAt(ctx, ocispecs.Descriptor{ Digest: br.Logs.Digest, Size: br.Logs.Size_, MediaType: br.Logs.MediaType, @@ -286,7 +493,7 @@ func (h *HistoryQueue) Status(ctx context.Context, ref string, st chan<- *client } func (h *HistoryQueue) update(ctx context.Context, rec controlapi.BuildHistoryRecord) error { - return h.DB.Update(func(tx *bolt.Tx) (err error) { + return h.opt.DB.Update(func(tx *bolt.Tx) (err error) { b := tx.Bucket([]byte(recordsBucket)) if b == nil { return nil @@ -296,7 +503,7 @@ func (h *HistoryQueue) update(ctx context.Context, rec controlapi.BuildHistoryRe return err } - l, err := h.LeaseManager.Create(ctx, leases.WithID(h.leaseID(rec.Ref))) + l, err := h.hLeaseManager.Create(ctx, leases.WithID(h.leaseID(rec.Ref))) created := true if err != nil { if !errors.Is(err, errdefs.ErrAlreadyExists) { @@ -308,32 +515,32 @@ func (h *HistoryQueue) update(ctx context.Context, rec controlapi.BuildHistoryRe defer func() { if err != nil && created { - h.LeaseManager.Delete(ctx, l) + h.hLeaseManager.Delete(ctx, l) } }() - if err := h.addResource(ctx, l, rec.Logs); err != nil { + if err := h.addResource(ctx, l, rec.Logs, false); err != nil { return err } - if err := h.addResource(ctx, l, rec.Trace); err != nil { + if err := h.addResource(ctx, l, rec.Trace, false); err != nil { return err } if rec.Result != nil { - if err := h.addResource(ctx, l, rec.Result.Result); err != nil { + if err := h.addResource(ctx, l, rec.Result.Result, true); err != nil { return err } for _, att := range rec.Result.Attestations { - if err := h.addResource(ctx, l, att); err != nil { + if err := h.addResource(ctx, l, att, false); err != nil { return err } } } for _, r := range rec.Results { - if err := h.addResource(ctx, l, r.Result); err != nil { + if err := h.addResource(ctx, l, r.Result, true); err != nil { return err } for _, att := range r.Attestations { - if err := h.addResource(ctx, l, att); err != nil { + if err := h.addResource(ctx, l, att, false); err != nil { return err } } @@ -371,27 +578,27 @@ func (h *HistoryQueue) Delete(ctx context.Context, ref string) error { } func (h *HistoryQueue) OpenBlobWriter(ctx context.Context, mt string) (_ *Writer, err error) { - l, err := h.LeaseManager.Create(ctx, leases.WithRandomID(), leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary) + l, err := h.hLeaseManager.Create(ctx, leases.WithRandomID(), leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary) if err != nil { return nil, err } defer func() { if err != nil { - h.LeaseManager.Delete(ctx, l) + h.hLeaseManager.Delete(ctx, l) } }() ctx = leases.WithLease(ctx, l.ID) - w, err := content.OpenWriter(ctx, h.ContentStore, content.WithRef("history-"+h.leaseID(l.ID))) + w, err := content.OpenWriter(ctx, h.hContentStore, content.WithRef("history-"+h.leaseID(l.ID))) if err != nil { return nil, err } return &Writer{ mt: mt, - lm: h.LeaseManager, + lm: h.hLeaseManager, l: l, w: w, dgstr: digest.Canonical.Digester(), @@ -424,7 +631,7 @@ func (w *Writer) Discard() { func (w *Writer) Commit(ctx context.Context) (*ocispecs.Descriptor, func(), error) { dgst := w.dgstr.Digest() sz := int64(w.sz) - if err := w.w.Commit(ctx, int64(w.sz), dgst); err != nil { + if err := w.w.Commit(leases.WithLease(ctx, w.l.ID), int64(w.sz), dgst); err != nil { if !errdefs.IsAlreadyExists(err) { w.Discard() return nil, nil, err @@ -559,7 +766,10 @@ func (h *HistoryQueue) Listen(ctx context.Context, req *controlapi.BuildHistoryR if req.Ref != "" && e.Ref != req.Ref { continue } - sub.ps.Send(&controlapi.BuildHistoryEvent{ + if _, ok := h.deleted[e.Ref]; ok { + continue + } + sub.send(&controlapi.BuildHistoryEvent{ Type: controlapi.BuildHistoryEventType_STARTED, Record: e, }) @@ -568,7 +778,8 @@ func (h *HistoryQueue) Listen(ctx context.Context, req *controlapi.BuildHistoryR h.mu.Unlock() if !req.ActiveOnly { - if err := h.DB.View(func(tx *bolt.Tx) error { + events := []*controlapi.BuildHistoryEvent{} + if err := h.opt.DB.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(recordsBucket)) if b == nil { return nil @@ -581,17 +792,31 @@ func (h *HistoryQueue) Listen(ctx context.Context, req *controlapi.BuildHistoryR if err := br.Unmarshal(dt); err != nil { return errors.Wrapf(err, "failed to unmarshal build record %s", key) } - if err := f(&controlapi.BuildHistoryEvent{ + events = append(events, &controlapi.BuildHistoryEvent{ Record: &br, Type: controlapi.BuildHistoryEventType_COMPLETE, - }); err != nil { - return err - } + }) return nil }) }); err != nil { return err } + // filter out records that have been marked for deletion + h.mu.Lock() + for i, e := range events { + if _, ok := h.deleted[e.Record.Ref]; ok { + events[i] = nil + } + } + h.mu.Unlock() + for _, e := range events { + if e.Record == nil { + continue + } + if err := f(e); err != nil { + return err + } + } } if req.EarlyExit { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go b/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go index 2cfeaae7a2..b61e7e3d1c 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go @@ -45,12 +45,16 @@ type MountManager struct { } func (mm *MountManager) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount, sharing pb.CacheSharingOpt, s session.Group) (mref cache.MutableRef, err error) { + name := fmt.Sprintf("cached mount %s from %s", m.Dest, mm.managerName) + if id != m.Dest { + name += fmt.Sprintf(" with id %q", id) + } g := &cacheRefGetter{ locker: &mm.cacheMountsMu, cacheMounts: mm.cacheMounts, cm: mm.cm, globalCacheRefs: sharedCacheRefs, - name: fmt.Sprintf("cached mount %s from %s", m.Dest, mm.managerName), + name: name, session: s, } return g.getRefCacheDir(ctx, ref, id, sharing) @@ -75,19 +79,19 @@ func (g *cacheRefGetter) getRefCacheDir(ctx context.Context, ref cache.Immutable defer mu.Unlock() if ref, ok := g.cacheMounts[key]; ok { - return ref.clone(), nil + return ref.clone(ctx), nil } defer func() { if err == nil { share := &cacheRefShare{MutableRef: mref, refs: map[*cacheRef]struct{}{}} g.cacheMounts[key] = share - mref = share.clone() + mref = share.clone(ctx) } }() switch sharing { case pb.CacheSharingOpt_SHARED: - return g.globalCacheRefs.get(key, func() (cache.MutableRef, error) { + return g.globalCacheRefs.get(ctx, key, func() (cache.MutableRef, error) { return g.getRefCacheDirNoCache(ctx, key, ref, id, false) }) case pb.CacheSharingOpt_PRIVATE: @@ -101,7 +105,12 @@ func (g *cacheRefGetter) getRefCacheDir(ctx context.Context, ref cache.Immutable func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, block bool) (cache.MutableRef, error) { makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) { - return g.cm.New(ctx, ref, g.session, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(g.name), cache.CachePolicyRetain) + newRef, err := g.cm.New(ctx, ref, g.session, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(g.name), cache.CachePolicyRetain) + if err != nil { + return nil, err + } + bklog.G(ctx).Debugf("created new ref for cache dir %q: %s", id, newRef.ID()) + return newRef, nil } cacheRefsLocker.Lock(key) @@ -114,10 +123,12 @@ func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, locked := false for _, si := range sis { if mRef, err := g.cm.GetMutable(ctx, si.ID()); err == nil { - bklog.G(ctx).Debugf("reusing ref for cache dir: %s", mRef.ID()) + bklog.G(ctx).Debugf("reusing ref for cache dir %q: %s", id, mRef.ID()) return mRef, nil } else if errors.Is(err, cache.ErrLocked) { locked = true + } else { + bklog.G(ctx).WithError(err).Errorf("failed to get reuse ref for cache dir %q: %s", id, si.ID()) } } if block && locked { @@ -438,7 +449,7 @@ func CacheMountsLocker() sync.Locker { return &sharedCacheRefs.mu } -func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) { +func (r *cacheRefs) get(ctx context.Context, key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) { r.mu.Lock() defer r.mu.Unlock() @@ -448,7 +459,7 @@ func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache. share, ok := r.shares[key] if ok { - return share.clone(), nil + return share.clone(ctx), nil } mref, err := fn() @@ -458,7 +469,7 @@ func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache. share = &cacheRefShare{MutableRef: mref, main: r, key: key, refs: map[*cacheRef]struct{}{}} r.shares[key] = share - return share.clone(), nil + return share.clone(ctx), nil } type cacheRefShare struct { @@ -469,7 +480,11 @@ type cacheRefShare struct { key string } -func (r *cacheRefShare) clone() cache.MutableRef { +func (r *cacheRefShare) clone(ctx context.Context) cache.MutableRef { + bklog.G(ctx).WithFields(map[string]any{ + "key": r.key, + "stack": bklog.LazyStackTrace{}, + }).Trace("cloning cache mount ref share") cacheRef := &cacheRef{cacheRefShare: r} if cacheRefCloneHijack != nil { cacheRefCloneHijack() @@ -481,6 +496,10 @@ func (r *cacheRefShare) clone() cache.MutableRef { } func (r *cacheRefShare) release(ctx context.Context) error { + bklog.G(ctx).WithFields(map[string]any{ + "key": r.key, + "stack": bklog.LazyStackTrace{}, + }).Trace("releasing cache mount ref share main") if r.main != nil { delete(r.main.shares, r.key) } @@ -495,6 +514,10 @@ type cacheRef struct { } func (r *cacheRef) Release(ctx context.Context) error { + bklog.G(ctx).WithFields(map[string]any{ + "key": r.key, + "stack": bklog.LazyStackTrace{}, + }).Trace("releasing cache mount ref share") if r.main != nil { r.main.mu.Lock() defer r.main.mu.Unlock() diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go index 2bee1283b4..eee0dd39fb 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go @@ -7,13 +7,15 @@ import ( "fmt" "os" "path" + "runtime" "sort" "strings" "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/frontend/gateway" + resourcestypes "github.com/moby/buildkit/executor/resources/types" + "github.com/moby/buildkit/frontend/gateway/container" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/secrets" "github.com/moby/buildkit/solver" @@ -43,6 +45,8 @@ type ExecOp struct { platform *pb.Platform numInputs int parallelism *semaphore.Weighted + rec resourcestypes.Recorder + digest digest.Digest } var _ solver.Op = &ExecOp{} @@ -62,9 +66,14 @@ func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache. w: w, platform: platform, parallelism: parallelism, + digest: v.Digest(), }, nil } +func (e *ExecOp) Digest() digest.Digest { + return e.digest +} + func (e *ExecOp) Proto() *pb.ExecOp { return e.op } @@ -252,10 +261,14 @@ func (e *ExecOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu } } - p, err := gateway.PrepareMounts(ctx, e.mm, e.cm, g, e.op.Meta.Cwd, e.op.Mounts, refs, func(m *pb.Mount, ref cache.ImmutableRef) (cache.MutableRef, error) { + platformOS := runtime.GOOS + if e.platform != nil { + platformOS = e.platform.OS + } + p, err := container.PrepareMounts(ctx, e.mm, e.cm, g, e.op.Meta.Cwd, e.op.Mounts, refs, func(m *pb.Mount, ref cache.ImmutableRef) (cache.MutableRef, error) { desc := fmt.Sprintf("mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")) return e.cm.New(ctx, ref, g, cache.WithDescription(desc)) - }) + }, platformOS) defer func() { if err != nil { execInputs := make([]solver.Result, len(e.op.Mounts)) @@ -299,7 +312,7 @@ func (e *ExecOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu return nil, err } - extraHosts, err := gateway.ParseExtraHosts(e.op.Meta.ExtraHosts) + extraHosts, err := container.ParseExtraHosts(e.op.Meta.ExtraHosts) if err != nil { return nil, err } @@ -357,7 +370,7 @@ func (e *ExecOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu } }() - execErr := e.exec.Run(ctx, "", p.Root, p.Mounts, executor.ProcessInfo{ + rec, execErr := e.exec.Run(ctx, "", p.Root, p.Mounts, executor.ProcessInfo{ Meta: meta, Stdin: nil, Stdout: stdout, @@ -377,6 +390,7 @@ func (e *ExecOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu // Prevent the result from being released. p.OutputRefs[i].Ref = nil } + e.rec = rec return results, errors.Wrapf(execErr, "process %q did not complete successfully", strings.Join(e.op.Meta.Args, " ")) } @@ -446,3 +460,10 @@ func (e *ExecOp) loadSecretEnv(ctx context.Context, g session.Group) ([]string, func (e *ExecOp) IsProvenanceProvider() { } + +func (e *ExecOp) Samples() (*resourcestypes.Samples, error) { + if e.rec == nil { + return nil, nil + } + return e.rec.Samples() +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go index 4f80ddfb65..db81201f1a 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go @@ -296,7 +296,7 @@ type FileOpSolver struct { mu sync.Mutex outs map[int]int ins map[int]input - g flightcontrol.Group + g flightcontrol.Group[input] } type input struct { @@ -405,7 +405,7 @@ func (s *FileOpSolver) validate(idx int, inputs []fileoptypes.Ref, actions []*pb } func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction, g session.Group) (input, error) { - inp, err := s.g.Do(ctx, fmt.Sprintf("inp-%d", idx), func(ctx context.Context) (_ interface{}, err error) { + return s.g.Do(ctx, fmt.Sprintf("inp-%d", idx), func(ctx context.Context) (_ input, err error) { s.mu.Lock() inp := s.ins[idx] s.mu.Unlock() @@ -547,17 +547,17 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp eg.Go(loadInput(ctx)) eg.Go(loadSecondaryInput(ctx)) if err := eg.Wait(); err != nil { - return nil, err + return input{}, err } } else { if action.Input != -1 { if err := loadInput(ctx)(); err != nil { - return nil, err + return input{}, err } } if action.SecondaryInput != -1 { if err := loadSecondaryInput(ctx)(); err != nil { - return nil, err + return input{}, err } } } @@ -565,7 +565,7 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp if inpMount == nil { m, err := s.r.Prepare(ctx, nil, false, g) if err != nil { - return nil, err + return input{}, err } inpMount = m } @@ -574,46 +574,46 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp case *pb.FileAction_Mkdir: user, group, err := loadOwner(ctx, a.Mkdir.Owner) if err != nil { - return nil, err + return input{}, err } if err := s.b.Mkdir(ctx, inpMount, user, group, *a.Mkdir); err != nil { - return nil, err + return input{}, err } case *pb.FileAction_Mkfile: user, group, err := loadOwner(ctx, a.Mkfile.Owner) if err != nil { - return nil, err + return input{}, err } if err := s.b.Mkfile(ctx, inpMount, user, group, *a.Mkfile); err != nil { - return nil, err + return input{}, err } case *pb.FileAction_Rm: if err := s.b.Rm(ctx, inpMount, *a.Rm); err != nil { - return nil, err + return input{}, err } case *pb.FileAction_Copy: if inpMountSecondary == nil { m, err := s.r.Prepare(ctx, nil, true, g) if err != nil { - return nil, err + return input{}, err } inpMountSecondary = m } user, group, err := loadOwner(ctx, a.Copy.Owner) if err != nil { - return nil, err + return input{}, err } if err := s.b.Copy(ctx, inpMountSecondary, inpMount, user, group, *a.Copy); err != nil { - return nil, err + return input{}, err } default: - return nil, errors.Errorf("invalid action type %T", action.Action) + return input{}, errors.Errorf("invalid action type %T", action.Action) } if inp.requiresCommit { ref, err := s.r.Commit(ctx, inpMount) if err != nil { - return nil, err + return input{}, err } inp.ref = ref } else { @@ -624,10 +624,6 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp s.mu.Unlock() return inp, nil }) - if err != nil { - return input{}, err - } - return inp.(input), err } func isDefaultIndexes(idxs [][]int) bool { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/proc/provenance.go b/vendor/github.com/moby/buildkit/solver/llbsolver/proc/provenance.go index 1af3af1960..ee29cceb05 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/proc/provenance.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/proc/provenance.go @@ -6,6 +6,7 @@ import ( "strconv" slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + "github.com/moby/buildkit/executor/resources" "github.com/moby/buildkit/exporter/containerimage/exptypes" gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/solver" @@ -15,7 +16,7 @@ import ( ) func ProvenanceProcessor(attrs map[string]string) llbsolver.Processor { - return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job) (*llbsolver.Result, error) { + return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job, usage *resources.SysSampler) (*llbsolver.Result, error) { ps, err := exptypes.ParsePlatforms(res.Metadata) if err != nil { return nil, err @@ -41,7 +42,7 @@ func ProvenanceProcessor(attrs map[string]string) llbsolver.Processor { return nil, errors.Errorf("could not find ref %s", p.ID) } - pc, err := llbsolver.NewProvenanceCreator(ctx, cp, ref, attrs, j) + pc, err := llbsolver.NewProvenanceCreator(ctx, cp, ref, attrs, j, usage) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/proc/sbom.go b/vendor/github.com/moby/buildkit/solver/llbsolver/proc/sbom.go index 0a99163a3a..20cdc71dae 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/proc/sbom.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/proc/sbom.go @@ -4,6 +4,7 @@ import ( "context" "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/executor/resources" "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend" "github.com/moby/buildkit/frontend/attestations/sbom" @@ -13,8 +14,8 @@ import ( "github.com/pkg/errors" ) -func SBOMProcessor(scannerRef string, useCache bool) llbsolver.Processor { - return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job) (*llbsolver.Result, error) { +func SBOMProcessor(scannerRef string, useCache bool, resolveMode string) llbsolver.Processor { + return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job, usage *resources.SysSampler) (*llbsolver.Result, error) { // skip sbom generation if we already have an sbom if sbom.HasSBOM(res.Result) { return res, nil @@ -25,7 +26,9 @@ func SBOMProcessor(scannerRef string, useCache bool) llbsolver.Processor { return nil, err } - scanner, err := sbom.CreateSBOMScanner(ctx, s.Bridge(j), scannerRef) + scanner, err := sbom.CreateSBOMScanner(ctx, s.Bridge(j), scannerRef, llb.ResolveImageConfigOpt{ + ResolveMode: resolveMode, + }) if err != nil { return nil, err } @@ -56,13 +59,13 @@ func SBOMProcessor(scannerRef string, useCache bool) llbsolver.Processor { if err != nil { return nil, err } - attSolve, err := result.ConvertAttestation(&att, func(st llb.State) (solver.ResultProxy, error) { + attSolve, err := result.ConvertAttestation(&att, func(st *llb.State) (solver.ResultProxy, error) { def, err := st.Marshal(ctx) if err != nil { return nil, err } - r, err := s.Bridge(j).Solve(ctx, frontend.SolveRequest{ // TODO: buildinfo + r, err := s.Bridge(j).Solve(ctx, frontend.SolveRequest{ Definition: def.ToPB(), }, j.SessionID) if err != nil { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance.go b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance.go index b30581c852..9138d6d9f8 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance.go @@ -12,6 +12,7 @@ import ( "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/executor/resources" "github.com/moby/buildkit/exporter/containerimage" "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend" @@ -130,18 +131,19 @@ func (b *provenanceBridge) findByResult(rp solver.ResultProxy) (*resultWithBridg return nil, false } -func (b *provenanceBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) { - dgst, config, err = b.llbBridge.ResolveImageConfig(ctx, ref, opt) +func (b *provenanceBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (resolvedRef string, dgst digest.Digest, config []byte, err error) { + ref, dgst, config, err = b.llbBridge.ResolveImageConfig(ctx, ref, opt) if err != nil { - return "", nil, err + return "", "", nil, err } b.images = append(b.images, provenance.ImageSource{ Ref: ref, Platform: opt.Platform, Digest: dgst, + Local: opt.ResolverType == llb.ResolverTypeOCILayout, }) - return dgst, config, nil + return ref, dgst, config, nil } func (b *provenanceBridge) Solve(ctx context.Context, req frontend.SolveRequest, sid string) (res *frontend.Result, err error) { @@ -322,10 +324,11 @@ func captureProvenance(ctx context.Context, res solver.CachedResultWithProvenanc if err != nil { return errors.Wrapf(err, "failed to parse OCI digest %s", pin) } - c.AddLocalImage(provenance.ImageSource{ + c.AddImage(provenance.ImageSource{ Ref: s.Reference.String(), Platform: s.Platform, Digest: dgst, + Local: true, }) default: return errors.Errorf("unknown source identifier %T", id) @@ -355,6 +358,13 @@ func captureProvenance(ctx context.Context, res solver.CachedResultWithProvenanc if pr.Network != pb.NetMode_NONE { c.NetworkAccess = true } + samples, err := op.Samples() + if err != nil { + return err + } + if samples != nil { + c.AddSamples(op.Digest(), samples) + } case *ops.BuildOp: c.IncompleteMaterials = true // not supported yet } @@ -369,10 +379,11 @@ func captureProvenance(ctx context.Context, res solver.CachedResultWithProvenanc type ProvenanceCreator struct { pr *provenance.ProvenancePredicate j *solver.Job + sampler *resources.SysSampler addLayers func() error } -func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solver.ResultProxy, attrs map[string]string, j *solver.Job) (*ProvenanceCreator, error) { +func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solver.ResultProxy, attrs map[string]string, j *solver.Job, usage *resources.SysSampler) (*ProvenanceCreator, error) { var reproducible bool if v, ok := attrs["reproducible"]; ok { b, err := strconv.ParseBool(v) @@ -394,6 +405,12 @@ func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solve } } + withUsage := false + if v, ok := attrs["capture-usage"]; ok { + b, err := strconv.ParseBool(v) + withUsage = err == nil && b + } + pr, err := provenance.NewPredicate(cp) if err != nil { return nil, err @@ -423,7 +440,7 @@ func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solve pr.Invocation.Parameters.Secrets = nil pr.Invocation.Parameters.SSH = nil case "max": - dgsts, err := provenance.AddBuildConfig(ctx, pr, res) + dgsts, err := AddBuildConfig(ctx, pr, cp, res, withUsage) if err != nil { return nil, err } @@ -478,11 +495,15 @@ func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solve return nil, errors.Errorf("invalid mode %q", mode) } - return &ProvenanceCreator{ + pc := &ProvenanceCreator{ pr: pr, j: j, addLayers: addLayers, - }, nil + } + if withUsage { + pc.sampler = usage + } + return pc, nil } func (p *ProvenanceCreator) Predicate() (*provenance.ProvenancePredicate, error) { @@ -495,6 +516,14 @@ func (p *ProvenanceCreator) Predicate() (*provenance.ProvenancePredicate, error) } } + if p.sampler != nil { + sysSamples, err := p.sampler.Close(true) + if err != nil { + return nil, err + } + p.pr.Metadata.BuildKitMetadata.SysUsage = sysSamples + } + return p.pr, nil } @@ -569,3 +598,161 @@ func resolveRemotes(ctx context.Context, res solver.Result) ([]*solver.Remote, e } return remotes, nil } + +func AddBuildConfig(ctx context.Context, p *provenance.ProvenancePredicate, c *provenance.Capture, rp solver.ResultProxy, withUsage bool) (map[digest.Digest]int, error) { + def := rp.Definition() + steps, indexes, err := toBuildSteps(def, c, withUsage) + if err != nil { + return nil, err + } + + bc := &provenance.BuildConfig{ + Definition: steps, + DigestMapping: digestMap(indexes), + } + + p.BuildConfig = bc + + if def.Source != nil { + sis := make([]provenance.SourceInfo, len(def.Source.Infos)) + for i, si := range def.Source.Infos { + steps, indexes, err := toBuildSteps(si.Definition, c, withUsage) + if err != nil { + return nil, err + } + s := provenance.SourceInfo{ + Filename: si.Filename, + Data: si.Data, + Language: si.Language, + Definition: steps, + DigestMapping: digestMap(indexes), + } + sis[i] = s + } + + if len(def.Source.Infos) != 0 { + locs := map[string]*pb.Locations{} + for k, l := range def.Source.Locations { + idx, ok := indexes[digest.Digest(k)] + if !ok { + continue + } + locs[fmt.Sprintf("step%d", idx)] = l + } + + if p.Metadata == nil { + p.Metadata = &provenance.ProvenanceMetadata{} + } + p.Metadata.BuildKitMetadata.Source = &provenance.Source{ + Infos: sis, + Locations: locs, + } + } + } + + return indexes, nil +} + +func digestMap(idx map[digest.Digest]int) map[digest.Digest]string { + m := map[digest.Digest]string{} + for k, v := range idx { + m[k] = fmt.Sprintf("step%d", v) + } + return m +} + +func toBuildSteps(def *pb.Definition, c *provenance.Capture, withUsage bool) ([]provenance.BuildStep, map[digest.Digest]int, error) { + if def == nil || len(def.Def) == 0 { + return nil, nil, nil + } + + ops := make(map[digest.Digest]*pb.Op) + defs := make(map[digest.Digest][]byte) + + var dgst digest.Digest + for _, dt := range def.Def { + var op pb.Op + if err := (&op).Unmarshal(dt); err != nil { + return nil, nil, errors.Wrap(err, "failed to parse llb proto op") + } + if src := op.GetSource(); src != nil { + for k := range src.Attrs { + if k == "local.session" || k == "local.unique" { + delete(src.Attrs, k) + } + } + } + dgst = digest.FromBytes(dt) + ops[dgst] = &op + defs[dgst] = dt + } + + if dgst == "" { + return nil, nil, nil + } + + // depth first backwards + dgsts := make([]digest.Digest, 0, len(def.Def)) + op := ops[dgst] + + if op.Op != nil { + return nil, nil, errors.Errorf("invalid last vertex: %T", op.Op) + } + + if len(op.Inputs) != 1 { + return nil, nil, errors.Errorf("invalid last vertex inputs: %v", len(op.Inputs)) + } + + visited := map[digest.Digest]struct{}{} + dgsts, err := walkDigests(dgsts, ops, dgst, visited) + if err != nil { + return nil, nil, err + } + indexes := map[digest.Digest]int{} + for i, dgst := range dgsts { + indexes[dgst] = i + } + + out := make([]provenance.BuildStep, 0, len(dgsts)) + for i, dgst := range dgsts { + op := *ops[dgst] + inputs := make([]string, len(op.Inputs)) + for i, inp := range op.Inputs { + inputs[i] = fmt.Sprintf("step%d:%d", indexes[inp.Digest], inp.Index) + } + op.Inputs = nil + s := provenance.BuildStep{ + ID: fmt.Sprintf("step%d", i), + Inputs: inputs, + Op: op, + } + if withUsage { + s.ResourceUsage = c.Samples[dgst] + } + out = append(out, s) + } + return out, indexes, nil +} + +func walkDigests(dgsts []digest.Digest, ops map[digest.Digest]*pb.Op, dgst digest.Digest, visited map[digest.Digest]struct{}) ([]digest.Digest, error) { + if _, ok := visited[dgst]; ok { + return dgsts, nil + } + op, ok := ops[dgst] + if !ok { + return nil, errors.Errorf("failed to find input %v", dgst) + } + if op == nil { + return nil, errors.Errorf("invalid nil input %v", dgst) + } + visited[dgst] = struct{}{} + for _, inp := range op.Inputs { + var err error + dgsts, err = walkDigests(dgsts, ops, inp.Digest, visited) + if err != nil { + return nil, err + } + } + dgsts = append(dgsts, dgst) + return dgsts, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/buildconfig.go b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/buildconfig.go index 4d9bf85ec1..8f903585be 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/buildconfig.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/buildconfig.go @@ -1,13 +1,9 @@ package provenance import ( - "context" - "fmt" - - "github.com/moby/buildkit/solver" + resourcestypes "github.com/moby/buildkit/executor/resources/types" "github.com/moby/buildkit/solver/pb" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) type BuildConfig struct { @@ -16,9 +12,10 @@ type BuildConfig struct { } type BuildStep struct { - ID string `json:"id,omitempty"` - Op interface{} `json:"op,omitempty"` - Inputs []string `json:"inputs,omitempty"` + ID string `json:"id,omitempty"` + Op interface{} `json:"op,omitempty"` + Inputs []string `json:"inputs,omitempty"` + ResourceUsage *resourcestypes.Samples `json:"resourceUsage,omitempty"` } type Source struct { @@ -28,160 +25,8 @@ type Source struct { type SourceInfo struct { Filename string `json:"filename,omitempty"` + Language string `json:"language,omitempty"` Data []byte `json:"data,omitempty"` Definition []BuildStep `json:"llbDefinition,omitempty"` DigestMapping map[digest.Digest]string `json:"digestMapping,omitempty"` } - -func digestMap(idx map[digest.Digest]int) map[digest.Digest]string { - m := map[digest.Digest]string{} - for k, v := range idx { - m[k] = fmt.Sprintf("step%d", v) - } - return m -} - -func AddBuildConfig(ctx context.Context, p *ProvenancePredicate, rp solver.ResultProxy) (map[digest.Digest]int, error) { - def := rp.Definition() - steps, indexes, err := toBuildSteps(def) - if err != nil { - return nil, err - } - - bc := &BuildConfig{ - Definition: steps, - DigestMapping: digestMap(indexes), - } - - p.BuildConfig = bc - - if def.Source != nil { - sis := make([]SourceInfo, len(def.Source.Infos)) - for i, si := range def.Source.Infos { - steps, indexes, err := toBuildSteps(si.Definition) - if err != nil { - return nil, err - } - s := SourceInfo{ - Filename: si.Filename, - Data: si.Data, - Definition: steps, - DigestMapping: digestMap(indexes), - } - sis[i] = s - } - - if len(def.Source.Infos) != 0 { - locs := map[string]*pb.Locations{} - for k, l := range def.Source.Locations { - idx, ok := indexes[digest.Digest(k)] - if !ok { - continue - } - locs[fmt.Sprintf("step%d", idx)] = l - } - - if p.Metadata == nil { - p.Metadata = &ProvenanceMetadata{} - } - p.Metadata.BuildKitMetadata.Source = &Source{ - Infos: sis, - Locations: locs, - } - } - } - - return indexes, nil -} - -func toBuildSteps(def *pb.Definition) ([]BuildStep, map[digest.Digest]int, error) { - if def == nil || len(def.Def) == 0 { - return nil, nil, nil - } - - ops := make(map[digest.Digest]*pb.Op) - defs := make(map[digest.Digest][]byte) - - var dgst digest.Digest - for _, dt := range def.Def { - var op pb.Op - if err := (&op).Unmarshal(dt); err != nil { - return nil, nil, errors.Wrap(err, "failed to parse llb proto op") - } - if src := op.GetSource(); src != nil { - for k := range src.Attrs { - if k == "local.session" || k == "local.unique" { - delete(src.Attrs, k) - } - } - } - dgst = digest.FromBytes(dt) - ops[dgst] = &op - defs[dgst] = dt - } - - if dgst == "" { - return nil, nil, nil - } - - // depth first backwards - dgsts := make([]digest.Digest, 0, len(def.Def)) - op := ops[dgst] - - if op.Op != nil { - return nil, nil, errors.Errorf("invalid last vertex: %T", op.Op) - } - - if len(op.Inputs) != 1 { - return nil, nil, errors.Errorf("invalid last vertex inputs: %v", len(op.Inputs)) - } - - visited := map[digest.Digest]struct{}{} - dgsts, err := walkDigests(dgsts, ops, dgst, visited) - if err != nil { - return nil, nil, err - } - indexes := map[digest.Digest]int{} - for i, dgst := range dgsts { - indexes[dgst] = i - } - - out := make([]BuildStep, 0, len(dgsts)) - for i, dgst := range dgsts { - op := *ops[dgst] - inputs := make([]string, len(op.Inputs)) - for i, inp := range op.Inputs { - inputs[i] = fmt.Sprintf("step%d:%d", indexes[inp.Digest], inp.Index) - } - op.Inputs = nil - out = append(out, BuildStep{ - ID: fmt.Sprintf("step%d", i), - Inputs: inputs, - Op: op, - }) - } - return out, indexes, nil -} - -func walkDigests(dgsts []digest.Digest, ops map[digest.Digest]*pb.Op, dgst digest.Digest, visited map[digest.Digest]struct{}) ([]digest.Digest, error) { - if _, ok := visited[dgst]; ok { - return dgsts, nil - } - op, ok := ops[dgst] - if !ok { - return nil, errors.Errorf("failed to find input %v", dgst) - } - if op == nil { - return nil, errors.Errorf("invalid nil input %v", dgst) - } - visited[dgst] = struct{}{} - for _, inp := range op.Inputs { - var err error - dgsts, err = walkDigests(dgsts, ops, inp.Digest, visited) - if err != nil { - return nil, err - } - } - dgsts = append(dgsts, dgst) - return dgsts, nil -} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/capture.go b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/capture.go index 6252ebc3cf..f4d43fba4c 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/capture.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/capture.go @@ -4,6 +4,7 @@ import ( "sort" distreference "github.com/docker/distribution/reference" + resourcestypes "github.com/moby/buildkit/executor/resources/types" "github.com/moby/buildkit/solver/result" "github.com/moby/buildkit/util/urlutil" digest "github.com/opencontainers/go-digest" @@ -16,6 +17,7 @@ type ImageSource struct { Ref string Platform *ocispecs.Platform Digest digest.Digest + Local bool } type GitSource struct { @@ -43,11 +45,10 @@ type SSH struct { } type Sources struct { - Images []ImageSource - LocalImages []ImageSource - Git []GitSource - HTTP []HTTPSource - Local []LocalSource + Images []ImageSource + Git []GitSource + HTTP []HTTPSource + Local []LocalSource } type Capture struct { @@ -58,6 +59,7 @@ type Capture struct { SSH []SSH NetworkAccess bool IncompleteMaterials bool + Samples map[digest.Digest]*resourcestypes.Samples } func (c *Capture) Merge(c2 *Capture) error { @@ -67,9 +69,6 @@ func (c *Capture) Merge(c2 *Capture) error { for _, i := range c2.Sources.Images { c.AddImage(i) } - for _, i := range c2.Sources.LocalImages { - c.AddLocalImage(i) - } for _, l := range c2.Sources.Local { c.AddLocal(l) } @@ -98,9 +97,6 @@ func (c *Capture) Sort() { sort.Slice(c.Sources.Images, func(i, j int) bool { return c.Sources.Images[i].Ref < c.Sources.Images[j].Ref }) - sort.Slice(c.Sources.LocalImages, func(i, j int) bool { - return c.Sources.LocalImages[i].Ref < c.Sources.LocalImages[j].Ref - }) sort.Slice(c.Sources.Local, func(i, j int) bool { return c.Sources.Local[i].Name < c.Sources.Local[j].Name }) @@ -151,7 +147,7 @@ func (c *Capture) OptimizeImageSources() error { func (c *Capture) AddImage(i ImageSource) { for _, v := range c.Sources.Images { - if v.Ref == i.Ref { + if v.Ref == i.Ref && v.Local == i.Local { if v.Platform == i.Platform { return } @@ -165,22 +161,6 @@ func (c *Capture) AddImage(i ImageSource) { c.Sources.Images = append(c.Sources.Images, i) } -func (c *Capture) AddLocalImage(i ImageSource) { - for _, v := range c.Sources.LocalImages { - if v.Ref == i.Ref { - if v.Platform == i.Platform { - return - } - if v.Platform != nil && i.Platform != nil { - if v.Platform.Architecture == i.Platform.Architecture && v.Platform.OS == i.Platform.OS && v.Platform.Variant == i.Platform.Variant { - return - } - } - } - } - c.Sources.LocalImages = append(c.Sources.LocalImages, i) -} - func (c *Capture) AddLocal(l LocalSource) { for _, v := range c.Sources.Local { if v.Name == l.Name { @@ -237,6 +217,13 @@ func (c *Capture) AddSSH(s SSH) { c.SSH = append(c.SSH, s) } +func (c *Capture) AddSamples(dgst digest.Digest, samples *resourcestypes.Samples) { + if c.Samples == nil { + c.Samples = map[digest.Digest]*resourcestypes.Samples{} + } + c.Samples[dgst] = samples +} + func parseRefName(s string) (distreference.Named, string, error) { ref, err := distreference.ParseNormalizedNamed(s) if err != nil { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/predicate.go b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/predicate.go index f2f7c4e2ad..f07ce879d7 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/predicate.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/predicate.go @@ -6,6 +6,7 @@ import ( "github.com/containerd/containerd/platforms" slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + resourcetypes "github.com/moby/buildkit/executor/resources/types" "github.com/moby/buildkit/util/purl" "github.com/moby/buildkit/util/urlutil" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" @@ -50,17 +51,24 @@ type ProvenanceMetadata struct { } type BuildKitMetadata struct { - VCS map[string]string `json:"vcs,omitempty"` - Source *Source `json:"source,omitempty"` - Layers map[string][][]ocispecs.Descriptor `json:"layers,omitempty"` + VCS map[string]string `json:"vcs,omitempty"` + Source *Source `json:"source,omitempty"` + Layers map[string][][]ocispecs.Descriptor `json:"layers,omitempty"` + SysUsage []*resourcetypes.SysSample `json:"sysUsage,omitempty"` } func slsaMaterials(srcs Sources) ([]slsa.ProvenanceMaterial, error) { - count := len(srcs.Images) + len(srcs.Git) + len(srcs.HTTP) + len(srcs.LocalImages) + count := len(srcs.Images) + len(srcs.Git) + len(srcs.HTTP) out := make([]slsa.ProvenanceMaterial, 0, count) for _, s := range srcs.Images { - uri, err := purl.RefToPURL(s.Ref, s.Platform) + var uri string + var err error + if s.Local { + uri, err = purl.RefToPURL(packageurl.TypeOCI, s.Ref, s.Platform) + } else { + uri, err = purl.RefToPURL(packageurl.TypeDocker, s.Ref, s.Platform) + } if err != nil { return nil, err } @@ -93,26 +101,6 @@ func slsaMaterials(srcs Sources) ([]slsa.ProvenanceMaterial, error) { }) } - for _, s := range srcs.LocalImages { - q := []packageurl.Qualifier{} - if s.Platform != nil { - q = append(q, packageurl.Qualifier{ - Key: "platform", - Value: platforms.Format(*s.Platform), - }) - } - packageurl.NewPackageURL(packageurl.TypeOCI, "", s.Ref, "", q, "") - - material := slsa.ProvenanceMaterial{ - URI: s.Ref, - } - if s.Digest != "" { - material.Digest = slsa.DigestSet{ - s.Digest.Algorithm().String(): s.Digest.Hex(), - } - } - out = append(out, material) - } return out, nil } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go index d65a9e6490..9295e08c63 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go @@ -2,7 +2,6 @@ package llbsolver import ( "context" - "encoding/base64" "encoding/json" "fmt" "os" @@ -10,6 +9,7 @@ import ( "sync" "time" + intoto "github.com/in-toto/in-toto-golang/in_toto" slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/cache" @@ -17,6 +17,8 @@ import ( "github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/client" controlgateway "github.com/moby/buildkit/control/gateway" + "github.com/moby/buildkit/executor/resources" + resourcetypes "github.com/moby/buildkit/executor/resources/types" "github.com/moby/buildkit/exporter" "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend" @@ -27,8 +29,7 @@ import ( "github.com/moby/buildkit/solver/llbsolver/provenance" "github.com/moby/buildkit/solver/result" spb "github.com/moby/buildkit/sourcepolicy/pb" - "github.com/moby/buildkit/util/attestation" - "github.com/moby/buildkit/util/buildinfo" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/entitlements" "github.com/moby/buildkit/util/grpcerrors" @@ -37,7 +38,6 @@ import ( "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/sdk/trace/tracetest" "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" @@ -76,6 +76,7 @@ type Opt struct { SessionManager *session.Manager WorkerController *worker.Controller HistoryQueue *HistoryQueue + ResourceMonitor *resources.Monitor } type Solver struct { @@ -89,11 +90,12 @@ type Solver struct { sm *session.Manager entitlements []string history *HistoryQueue + sysSampler *resources.Sampler[*resourcetypes.SysSample] } // Processor defines a processing function to be applied after solving, but // before exporting -type Processor func(ctx context.Context, result *Result, s *Solver, j *solver.Job) (*Result, error) +type Processor func(ctx context.Context, result *Result, s *Solver, j *solver.Job, usage *resources.SysSampler) (*Result, error) func New(opt Opt) (*Solver, error) { s := &Solver{ @@ -108,6 +110,12 @@ func New(opt Opt) (*Solver, error) { history: opt.HistoryQueue, } + sampler, err := resources.NewSysSampler() + if err != nil { + return nil, err + } + s.sysSampler = sampler + s.solver = solver.NewSolver(solver.SolverOpt{ ResolveOpFunc: s.resolver(), DefaultCache: opt.CacheManager, @@ -141,7 +149,7 @@ func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { return s.bridge(b) } -func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend.SolveRequest, exp ExporterRequest, j *solver.Job) (func(*Result, exporter.DescriptorReference, error) error, error) { +func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend.SolveRequest, exp ExporterRequest, j *solver.Job, usage *resources.SysSampler) (func(*Result, exporter.DescriptorReference, error) error, error) { var stopTrace func() []tracetest.SpanStub if s := trace.SpanFromContext(ctx); s.SpanContext().IsValid() { @@ -196,11 +204,12 @@ func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend var releasers []func() attrs := map[string]string{ - "mode": "max", + "mode": "max", + "capture-usage": "true", } makeProvenance := func(res solver.ResultProxy, cap *provenance.Capture) (*controlapi.Descriptor, func(), error) { - prc, err := NewProvenanceCreator(ctx2, cap, res, attrs, j) + prc, err := NewProvenanceCreator(ctx2, cap, res, attrs, j, usage) if err != nil { return nil, nil, err } @@ -212,7 +221,7 @@ func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend if err != nil { return nil, nil, err } - w, err := s.history.OpenBlobWriter(ctx, attestation.MediaTypeDockerSchema2AttestationType) + w, err := s.history.OpenBlobWriter(ctx, intoto.PayloadType) if err != nil { return nil, nil, err } @@ -333,7 +342,7 @@ func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend }() if err != nil { - st, ok := grpcerrors.AsGRPCStatus(grpcerrors.ToGRPC(err)) + st, ok := grpcerrors.AsGRPCStatus(grpcerrors.ToGRPC(ctx, err)) if !ok { st = status.New(codes.Unknown, err.Error()) } @@ -349,7 +358,7 @@ func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend } if stopTrace == nil { - logrus.Warn("no trace recorder found, skipping") + bklog.G(ctx).Warn("no trace recorder found, skipping") return err } go func() { @@ -391,7 +400,7 @@ func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend } return nil }(); err != nil { - logrus.Errorf("failed to save trace for %s: %+v", id, err) + bklog.G(ctx).Errorf("failed to save trace for %s: %+v", id, err) } }() @@ -407,6 +416,12 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro defer j.Discard() + var usage *resources.Sub[*resourcetypes.SysSample] + if s.sysSampler != nil { + usage = s.sysSampler.Record() + defer usage.Close(false) + } + var res *frontend.Result var resProv *Result var descref exporter.DescriptorReference @@ -453,7 +468,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro } if !internal { - rec, err1 := s.recordBuildHistory(ctx, id, req, exp, j) + rec, err1 := s.recordBuildHistory(ctx, id, req, exp, j, usage) if err1 != nil { defer j.CloseProgress() return nil, err1 @@ -510,7 +525,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro } for _, post := range post { - res2, err := post(ctx, resProv, s, j) + res2, err := post(ctx, resProv, s, j, usage) if err != nil { return nil, err } @@ -568,9 +583,6 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro if strings.HasPrefix(k, "frontend.") { exporterResponse[k] = string(v) } - if strings.HasPrefix(k, exptypes.ExporterBuildInfo) { - exporterResponse[k] = base64.StdEncoding.EncodeToString(v) - } } for k, v := range cacheExporterResponse { if strings.HasPrefix(k, "cache.") { @@ -700,9 +712,6 @@ func addProvenanceToResult(res *frontend.Result, br *provenanceBridge) (*Result, if res.Metadata == nil { res.Metadata = map[string][]byte{} } - if err := buildinfo.AddMetadata(res.Metadata, exptypes.ExporterBuildInfo, cp); err != nil { - return nil, err - } } if len(res.Refs) != 0 { @@ -717,9 +726,6 @@ func addProvenanceToResult(res *frontend.Result, br *provenanceBridge) (*Result, if res.Metadata == nil { res.Metadata = map[string][]byte{} } - if err := buildinfo.AddMetadata(res.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), cp); err != nil { - return nil, err - } } if len(res.Attestations) != 0 { @@ -746,8 +752,9 @@ func getRefProvenance(ref solver.ResultProxy, br *provenanceBridge) (*provenance } p := ref.Provenance() if p == nil { - return nil, errors.Errorf("missing provenance for %s", ref.ID()) + return nil, nil } + pr, ok := p.(*provenance.Capture) if !ok { return nil, errors.Errorf("invalid provenance type %T", p) @@ -892,6 +899,7 @@ func defaultResolver(wc *worker.Controller) ResolveWorkerFunc { return wc.GetDefault() } } + func allWorkers(wc *worker.Controller) func(func(w worker.Worker) error) error { return func(f func(worker.Worker) error) error { all, err := wc.List() @@ -917,27 +925,26 @@ func inBuilderContext(ctx context.Context, b solver.Builder, name, id string, f } return b.InContext(ctx, func(ctx context.Context, g session.Group) error { pw, _, ctx := progress.NewFromContext(ctx, progress.WithMetadata("vertex", v.Digest)) - notifyCompleted := notifyStarted(ctx, &v, false) + notifyCompleted := notifyStarted(ctx, &v) defer pw.Close() err := f(ctx, g) - notifyCompleted(err, false) + notifyCompleted(err) return err }) } -func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) func(err error, cached bool) { +func notifyStarted(ctx context.Context, v *client.Vertex) func(err error) { pw, _, _ := progress.NewFromContext(ctx) start := time.Now() v.Started = &start v.Completed = nil - v.Cached = cached id := identity.NewID() pw.Write(id, *v) - return func(err error, cached bool) { + return func(err error) { defer pw.Close() stop := time.Now() v.Completed = &stop - v.Cached = cached + v.Cached = false if err != nil { v.Error = err.Error() } @@ -977,27 +984,21 @@ func loadEntitlements(b solver.Builder) (entitlements.Set, error) { } func loadSourcePolicy(b solver.Builder) (*spb.Policy, error) { - set := make(map[spb.Rule]struct{}, 0) + var srcPol spb.Policy err := b.EachValue(context.TODO(), keySourcePolicy, func(v interface{}) error { x, ok := v.(spb.Policy) if !ok { return errors.Errorf("invalid source policy %T", v) } for _, f := range x.Rules { - set[*f] = struct{}{} + r := *f + srcPol.Rules = append(srcPol.Rules, &r) } + srcPol.Version = x.Version return nil }) if err != nil { return nil, err } - var srcPol *spb.Policy - if len(set) > 0 { - srcPol = &spb.Policy{} - for k := range set { - k := k - srcPol.Rules = append(srcPol.Rules, &k) - } - } - return srcPol, nil + return &srcPol, nil } diff --git a/vendor/github.com/moby/buildkit/solver/memorycachestorage.go b/vendor/github.com/moby/buildkit/solver/memorycachestorage.go index fc50d82ad4..7fd1fa6268 100644 --- a/vendor/github.com/moby/buildkit/solver/memorycachestorage.go +++ b/vendor/github.com/moby/buildkit/solver/memorycachestorage.go @@ -303,7 +303,7 @@ func (s *inMemoryResultStore) LoadRemotes(_ context.Context, _ CacheResult, _ *c return nil, nil } -func (s *inMemoryResultStore) Exists(id string) bool { +func (s *inMemoryResultStore) Exists(ctx context.Context, id string) bool { _, ok := s.m.Load(id) return ok } diff --git a/vendor/github.com/moby/buildkit/solver/pb/caps.go b/vendor/github.com/moby/buildkit/solver/pb/caps.go index 02380a4bab..5e1963ff8f 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/caps.go +++ b/vendor/github.com/moby/buildkit/solver/pb/caps.go @@ -49,7 +49,7 @@ const ( CapExecMetaUlimit apicaps.CapID = "exec.meta.ulimit" CapExecMetaRemoveMountStubsRecursive apicaps.CapID = "exec.meta.removemountstubs.recursive" CapExecMountBind apicaps.CapID = "exec.mount.bind" - CapExecMountBindReadWriteNoOuput apicaps.CapID = "exec.mount.bind.readwrite-nooutput" + CapExecMountBindReadWriteNoOutput apicaps.CapID = "exec.mount.bind.readwrite-nooutput" CapExecMountCache apicaps.CapID = "exec.mount.cache" CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing" CapExecMountSelector apicaps.CapID = "exec.mount.selector" @@ -288,7 +288,7 @@ func init() { }) Caps.Init(apicaps.Cap{ - ID: CapExecMountBindReadWriteNoOuput, + ID: CapExecMountBindReadWriteNoOutput, Enabled: true, Status: apicaps.CapStatusExperimental, }) diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go index e8afea0233..aadff21b64 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go @@ -1443,6 +1443,7 @@ type SourceInfo struct { Filename string `protobuf:"bytes,1,opt,name=filename,proto3" json:"filename,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` Definition *Definition `protobuf:"bytes,3,opt,name=definition,proto3" json:"definition,omitempty"` + Language string `protobuf:"bytes,4,opt,name=language,proto3" json:"language,omitempty"` } func (m *SourceInfo) Reset() { *m = SourceInfo{} } @@ -1495,6 +1496,13 @@ func (m *SourceInfo) GetDefinition() *Definition { return nil } +func (m *SourceInfo) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + // Location defines list of areas in to source file type Location struct { SourceIndex int32 `protobuf:"varint,1,opt,name=sourceIndex,proto3" json:"sourceIndex,omitempty"` @@ -2842,168 +2850,169 @@ func init() { func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) } var fileDescriptor_8de16154b2733812 = []byte{ - // 2564 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcf, 0x6f, 0x5b, 0xc7, - 0xf1, 0x17, 0x7f, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xe8, 0xeb, 0xaf, 0xac, 0xbc, 0xa4, - 0x81, 0x2c, 0xdb, 0x32, 0xaa, 0x00, 0x71, 0x60, 0x04, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xc7, 0xa2, - 0xb0, 0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xe9, 0x41, 0x8f, 0x6f, 0x1f, 0xf6, 0x2d, - 0x2d, 0xb1, 0x87, 0x1e, 0xfa, 0x17, 0x04, 0x28, 0x50, 0xf4, 0x52, 0xf4, 0x9f, 0xe8, 0xb1, 0xbd, - 0x07, 0xc8, 0x25, 0x87, 0x1e, 0x82, 0x1e, 0xd2, 0xc2, 0xb9, 0xf4, 0x8f, 0x68, 0x81, 0x62, 0x66, - 0xf7, 0xfd, 0x20, 0x25, 0xc3, 0x71, 0x5b, 0xf4, 0xc4, 0x79, 0x33, 0x9f, 0x9d, 0x9d, 0x9d, 0x9d, - 0xd9, 0x99, 0x5d, 0x42, 0x43, 0x46, 0xf1, 0x56, 0xa4, 0xa4, 0x96, 0xac, 0x18, 0x1d, 0xad, 0xde, - 0x39, 0xf6, 0xf5, 0xc9, 0xf4, 0x68, 0xcb, 0x93, 0x93, 0xbb, 0xc7, 0xf2, 0x58, 0xde, 0x25, 0xd1, - 0xd1, 0x74, 0x4c, 0x5f, 0xf4, 0x41, 0x94, 0x19, 0xe2, 0xfc, 0xbd, 0x08, 0xc5, 0x41, 0xc4, 0xde, - 0x85, 0xaa, 0x1f, 0x46, 0x53, 0x1d, 0x77, 0x0a, 0xeb, 0xa5, 0x8d, 0xe6, 0x76, 0x63, 0x2b, 0x3a, - 0xda, 0xea, 0x23, 0x87, 0x5b, 0x01, 0x5b, 0x87, 0xb2, 0x38, 0x17, 0x5e, 0xa7, 0xb8, 0x5e, 0xd8, - 0x68, 0x6e, 0x03, 0x02, 0x7a, 0xe7, 0xc2, 0x1b, 0x44, 0xfb, 0x4b, 0x9c, 0x24, 0xec, 0x03, 0xa8, - 0xc6, 0x72, 0xaa, 0x3c, 0xd1, 0x29, 0x11, 0x66, 0x19, 0x31, 0x43, 0xe2, 0x10, 0xca, 0x4a, 0x51, - 0xd3, 0xd8, 0x0f, 0x44, 0xa7, 0x9c, 0x69, 0x7a, 0xe0, 0x07, 0x06, 0x43, 0x12, 0xf6, 0x1e, 0x54, - 0x8e, 0xa6, 0x7e, 0x30, 0xea, 0x54, 0x08, 0xd2, 0x44, 0xc8, 0x2e, 0x32, 0x08, 0x63, 0x64, 0x08, - 0x9a, 0x08, 0x75, 0x2c, 0x3a, 0xd5, 0x0c, 0xf4, 0x08, 0x19, 0x06, 0x44, 0x32, 0x9c, 0x6b, 0xe4, - 0x8f, 0xc7, 0x9d, 0x5a, 0x36, 0x57, 0xd7, 0x1f, 0x8f, 0xcd, 0x5c, 0x28, 0x61, 0x1b, 0x50, 0x8f, - 0x02, 0x57, 0x8f, 0xa5, 0x9a, 0x74, 0x20, 0xb3, 0xfb, 0xd0, 0xf2, 0x78, 0x2a, 0x65, 0xf7, 0xa0, - 0xe9, 0xc9, 0x30, 0xd6, 0xca, 0xf5, 0x43, 0x1d, 0x77, 0x9a, 0x04, 0x7e, 0x13, 0xc1, 0x5f, 0x48, - 0x75, 0x2a, 0xd4, 0x5e, 0x26, 0xe4, 0x79, 0xe4, 0x6e, 0x19, 0x8a, 0x32, 0x72, 0x7e, 0x53, 0x80, - 0x7a, 0xa2, 0x95, 0x39, 0xb0, 0xbc, 0xa3, 0xbc, 0x13, 0x5f, 0x0b, 0x4f, 0x4f, 0x95, 0xe8, 0x14, - 0xd6, 0x0b, 0x1b, 0x0d, 0x3e, 0xc7, 0x63, 0x2d, 0x28, 0x0e, 0x86, 0xe4, 0xef, 0x06, 0x2f, 0x0e, - 0x86, 0xac, 0x03, 0xb5, 0xa7, 0xae, 0xf2, 0xdd, 0x50, 0x93, 0x83, 0x1b, 0x3c, 0xf9, 0x64, 0xd7, - 0xa1, 0x31, 0x18, 0x3e, 0x15, 0x2a, 0xf6, 0x65, 0x48, 0x6e, 0x6d, 0xf0, 0x8c, 0xc1, 0xd6, 0x00, - 0x06, 0xc3, 0x07, 0xc2, 0x45, 0xa5, 0x71, 0xa7, 0xb2, 0x5e, 0xda, 0x68, 0xf0, 0x1c, 0xc7, 0xf9, - 0x25, 0x54, 0x68, 0xab, 0xd9, 0x67, 0x50, 0x1d, 0xf9, 0xc7, 0x22, 0xd6, 0xc6, 0x9c, 0xdd, 0xed, - 0xaf, 0xbe, 0xbb, 0xb1, 0xf4, 0x97, 0xef, 0x6e, 0x6c, 0xe6, 0x62, 0x4a, 0x46, 0x22, 0xf4, 0x64, - 0xa8, 0x5d, 0x3f, 0x14, 0x2a, 0xbe, 0x7b, 0x2c, 0xef, 0x98, 0x21, 0x5b, 0x5d, 0xfa, 0xe1, 0x56, - 0x03, 0xbb, 0x09, 0x15, 0x3f, 0x1c, 0x89, 0x73, 0xb2, 0xbf, 0xb4, 0x7b, 0xd5, 0xaa, 0x6a, 0x0e, - 0xa6, 0x3a, 0x9a, 0xea, 0x3e, 0x8a, 0xb8, 0x41, 0x38, 0x5f, 0x17, 0xa0, 0x6a, 0x42, 0x89, 0x5d, - 0x87, 0xf2, 0x44, 0x68, 0x97, 0xe6, 0x6f, 0x6e, 0xd7, 0xcd, 0x96, 0x6a, 0x97, 0x13, 0x17, 0xa3, - 0x74, 0x22, 0xa7, 0xe8, 0xfb, 0x62, 0x16, 0xa5, 0x8f, 0x90, 0xc3, 0xad, 0x80, 0xfd, 0x08, 0x6a, - 0xa1, 0xd0, 0x67, 0x52, 0x9d, 0x92, 0x8f, 0x5a, 0x26, 0x2c, 0x0e, 0x84, 0x7e, 0x24, 0x47, 0x82, - 0x27, 0x32, 0x76, 0x1b, 0xea, 0xb1, 0xf0, 0xa6, 0xca, 0xd7, 0x33, 0xf2, 0x57, 0x6b, 0xbb, 0x4d, - 0xc1, 0x6a, 0x79, 0x04, 0x4e, 0x11, 0xec, 0x16, 0x34, 0x62, 0xe1, 0x29, 0xa1, 0x45, 0xf8, 0x9c, - 0xfc, 0xd7, 0xdc, 0x5e, 0xb1, 0x70, 0x25, 0x74, 0x2f, 0x7c, 0xce, 0x33, 0xb9, 0xf3, 0x75, 0x11, - 0xca, 0x68, 0x33, 0x63, 0x50, 0x76, 0xd5, 0xb1, 0xc9, 0xa8, 0x06, 0x27, 0x9a, 0xb5, 0xa1, 0x84, - 0x3a, 0x8a, 0xc4, 0x42, 0x12, 0x39, 0xde, 0xd9, 0xc8, 0x6e, 0x28, 0x92, 0x38, 0x6e, 0x1a, 0x0b, - 0x65, 0xf7, 0x91, 0x68, 0x76, 0x13, 0x1a, 0x91, 0x92, 0xe7, 0xb3, 0x67, 0xc6, 0x82, 0x2c, 0x4a, - 0x91, 0x89, 0x06, 0xd4, 0x23, 0x4b, 0xb1, 0x4d, 0x00, 0x71, 0xae, 0x95, 0xbb, 0x2f, 0x63, 0x1d, - 0x77, 0xaa, 0x64, 0x2d, 0xc5, 0x3d, 0x32, 0xfa, 0x87, 0x3c, 0x27, 0x65, 0xab, 0x50, 0x3f, 0x91, - 0xb1, 0x0e, 0xdd, 0x89, 0xa0, 0x0c, 0x69, 0xf0, 0xf4, 0x9b, 0x39, 0x50, 0x9d, 0x06, 0xfe, 0xc4, - 0xd7, 0x9d, 0x46, 0xa6, 0xe3, 0x09, 0x71, 0xb8, 0x95, 0x60, 0x14, 0x7b, 0xc7, 0x4a, 0x4e, 0xa3, - 0x43, 0x57, 0x89, 0x50, 0x53, 0xfe, 0x34, 0xf8, 0x1c, 0x8f, 0x7d, 0x02, 0xef, 0x28, 0x31, 0x91, - 0xcf, 0x05, 0x6d, 0xd4, 0x50, 0x4f, 0x8f, 0x62, 0x8e, 0x8e, 0x8d, 0xfd, 0xe7, 0x82, 0x72, 0xa8, - 0xce, 0x5f, 0x0e, 0x70, 0x6e, 0x43, 0xd5, 0xd8, 0x8d, 0x6e, 0x41, 0xca, 0x66, 0x0a, 0xd1, 0x98, - 0x21, 0xfd, 0xc3, 0x24, 0x43, 0xfa, 0x87, 0x4e, 0x17, 0xaa, 0xc6, 0x42, 0x44, 0x1f, 0xe0, 0xaa, - 0x2c, 0x1a, 0x69, 0xe4, 0x0d, 0xe5, 0x58, 0x9b, 0x88, 0xe4, 0x44, 0x93, 0x56, 0x57, 0x19, 0xff, - 0x97, 0x38, 0xd1, 0xce, 0x43, 0x68, 0xa4, 0x3b, 0x4b, 0x53, 0x74, 0xad, 0x9a, 0x62, 0xbf, 0x8b, - 0x03, 0xc8, 0x5d, 0x66, 0x52, 0xa2, 0xd1, 0x8d, 0x32, 0xd2, 0xbe, 0x0c, 0xdd, 0x80, 0x14, 0xd5, - 0x79, 0xfa, 0xed, 0xfc, 0xb6, 0x04, 0x15, 0x5a, 0x18, 0xdb, 0xc0, 0x8c, 0x88, 0xa6, 0x66, 0x05, - 0xa5, 0x5d, 0x66, 0x33, 0x02, 0x28, 0xf7, 0xd2, 0x84, 0xc0, 0x3c, 0x5c, 0xc5, 0xe8, 0x0c, 0x84, - 0xa7, 0xa5, 0xb2, 0xf3, 0xa4, 0xdf, 0x38, 0xff, 0x08, 0x33, 0xd4, 0x04, 0x0c, 0xd1, 0xec, 0x16, - 0x54, 0x25, 0xa5, 0x15, 0xc5, 0xcc, 0x4b, 0x92, 0xcd, 0x42, 0x50, 0xb9, 0x12, 0xee, 0x48, 0x86, - 0xc1, 0x8c, 0x22, 0xa9, 0xce, 0xd3, 0x6f, 0x0c, 0x74, 0xca, 0xa3, 0xc7, 0xb3, 0xc8, 0x1c, 0xab, - 0x2d, 0x13, 0xe8, 0x8f, 0x12, 0x26, 0xcf, 0xe4, 0x78, 0x70, 0x3e, 0x9e, 0x44, 0xe3, 0x78, 0x10, - 0xe9, 0xce, 0xd5, 0x2c, 0x24, 0x13, 0x1e, 0x4f, 0xa5, 0x88, 0xf4, 0x5c, 0xef, 0x44, 0x20, 0xf2, - 0x5a, 0x86, 0xdc, 0xb3, 0x3c, 0x9e, 0x4a, 0xb3, 0x4c, 0x43, 0xe8, 0x9b, 0x04, 0xcd, 0x65, 0x1a, - 0x62, 0x33, 0x39, 0x46, 0xe8, 0x70, 0xb8, 0x8f, 0xc8, 0xb7, 0xb2, 0xd3, 0xdd, 0x70, 0xb8, 0x95, - 0x98, 0xd5, 0xc6, 0xd3, 0x40, 0xf7, 0xbb, 0x9d, 0xb7, 0x8d, 0x2b, 0x93, 0x6f, 0x67, 0x2d, 0x5b, - 0x00, 0xba, 0x35, 0xf6, 0x7f, 0x61, 0xe2, 0xa5, 0xc4, 0x89, 0x76, 0xfa, 0x50, 0x4f, 0x4c, 0xbc, - 0x10, 0x06, 0x77, 0xa0, 0x16, 0x9f, 0xb8, 0xca, 0x0f, 0x8f, 0x69, 0x87, 0x5a, 0xdb, 0x57, 0xd3, - 0x15, 0x0d, 0x0d, 0x1f, 0xad, 0x48, 0x30, 0x8e, 0x4c, 0x42, 0xea, 0x32, 0x5d, 0x6d, 0x28, 0x4d, - 0xfd, 0x11, 0xe9, 0x59, 0xe1, 0x48, 0x22, 0xe7, 0xd8, 0x37, 0x41, 0xb9, 0xc2, 0x91, 0x44, 0xfb, - 0x26, 0x72, 0x64, 0x6a, 0xe6, 0x0a, 0x27, 0x7a, 0x2e, 0xec, 0x2a, 0x0b, 0x61, 0x17, 0x24, 0xbe, - 0xf9, 0x9f, 0xcc, 0xf6, 0xeb, 0x02, 0xd4, 0x93, 0x42, 0x8f, 0xe5, 0xc6, 0x1f, 0x89, 0x50, 0xfb, - 0x63, 0x5f, 0x28, 0x3b, 0x71, 0x8e, 0xc3, 0xee, 0x40, 0xc5, 0xd5, 0x5a, 0x25, 0x87, 0xf8, 0xdb, - 0xf9, 0x2e, 0x61, 0x6b, 0x07, 0x25, 0xbd, 0x50, 0xab, 0x19, 0x37, 0xa8, 0xd5, 0x8f, 0x01, 0x32, - 0x26, 0xda, 0x7a, 0x2a, 0x66, 0x56, 0x2b, 0x92, 0xec, 0x1a, 0x54, 0x9e, 0xbb, 0xc1, 0x34, 0xc9, - 0x48, 0xf3, 0x71, 0xbf, 0xf8, 0x71, 0xc1, 0xf9, 0x53, 0x11, 0x6a, 0xb6, 0x6b, 0x60, 0xb7, 0xa1, - 0x46, 0x5d, 0x83, 0xb5, 0xe8, 0xf2, 0xf4, 0x4b, 0x20, 0xec, 0x6e, 0xda, 0x0e, 0xe5, 0x6c, 0xb4, - 0xaa, 0x4c, 0x5b, 0x64, 0x6d, 0xcc, 0x9a, 0xa3, 0xd2, 0x48, 0x8c, 0x6d, 0xdf, 0xd3, 0xa2, 0x2e, - 0x43, 0x8c, 0xfd, 0xd0, 0x47, 0xff, 0x70, 0x14, 0xb1, 0xdb, 0xc9, 0xaa, 0xcb, 0xa4, 0xf1, 0xad, - 0xbc, 0xc6, 0x8b, 0x8b, 0xee, 0x43, 0x33, 0x37, 0xcd, 0x25, 0xab, 0x7e, 0x3f, 0xbf, 0x6a, 0x3b, - 0x25, 0xa9, 0x33, 0x4d, 0x5b, 0xe6, 0x85, 0xff, 0xc0, 0x7f, 0x1f, 0x01, 0x64, 0x2a, 0x7f, 0xf8, - 0xf1, 0xe5, 0xfc, 0xb1, 0x04, 0x30, 0x88, 0xb0, 0x06, 0x8e, 0x5c, 0xaa, 0xda, 0xcb, 0xfe, 0x71, - 0x28, 0x95, 0x78, 0x46, 0x69, 0x4e, 0xe3, 0xeb, 0xbc, 0x69, 0x78, 0x94, 0x31, 0x6c, 0x07, 0x9a, - 0x23, 0x11, 0x7b, 0xca, 0xa7, 0x80, 0xb2, 0x4e, 0xbf, 0x81, 0x6b, 0xca, 0xf4, 0x6c, 0x75, 0x33, - 0x84, 0xf1, 0x55, 0x7e, 0x0c, 0xdb, 0x86, 0x65, 0x71, 0x1e, 0x49, 0xa5, 0xed, 0x2c, 0xa6, 0xb9, - 0xbc, 0x62, 0xda, 0x54, 0xe4, 0xd3, 0x4c, 0xbc, 0x29, 0xb2, 0x0f, 0xe6, 0x42, 0xd9, 0x73, 0xa3, - 0xd8, 0x96, 0xf4, 0xce, 0xc2, 0x7c, 0x7b, 0x6e, 0x64, 0x9c, 0xb6, 0xfb, 0x21, 0xae, 0xf5, 0x57, - 0x7f, 0xbd, 0x71, 0x2b, 0xd7, 0x07, 0x4d, 0xe4, 0xd1, 0xec, 0x2e, 0xc5, 0xcb, 0xa9, 0xaf, 0xef, - 0x4e, 0xb5, 0x1f, 0xdc, 0x75, 0x23, 0x1f, 0xd5, 0xe1, 0xc0, 0x7e, 0x97, 0x93, 0x6a, 0xf6, 0x31, - 0xb4, 0x22, 0x25, 0x8f, 0x95, 0x88, 0xe3, 0x67, 0x54, 0x15, 0x6d, 0xb7, 0xfa, 0x86, 0xad, 0xde, - 0x24, 0xf9, 0x14, 0x05, 0x7c, 0x25, 0xca, 0x7f, 0xae, 0xfe, 0x04, 0xda, 0x8b, 0x2b, 0x7e, 0x9d, - 0xdd, 0x5b, 0xbd, 0x07, 0x8d, 0x74, 0x05, 0xaf, 0x1a, 0x58, 0xcf, 0x6f, 0xfb, 0x1f, 0x0a, 0x50, - 0x35, 0xf9, 0xc8, 0xee, 0x41, 0x23, 0x90, 0x9e, 0x8b, 0x06, 0x24, 0x37, 0x83, 0x77, 0xb2, 0x74, - 0xdd, 0xfa, 0x3c, 0x91, 0x99, 0xfd, 0xc8, 0xb0, 0x18, 0x9e, 0x7e, 0x38, 0x96, 0x49, 0xfe, 0xb4, - 0xb2, 0x41, 0xfd, 0x70, 0x2c, 0xb9, 0x11, 0xae, 0x3e, 0x84, 0xd6, 0xbc, 0x8a, 0x4b, 0xec, 0x7c, - 0x6f, 0x3e, 0xd0, 0xa9, 0x1a, 0xa4, 0x83, 0xf2, 0x66, 0xdf, 0x83, 0x46, 0xca, 0x67, 0x9b, 0x17, - 0x0d, 0x5f, 0xce, 0x8f, 0xcc, 0xd9, 0xea, 0x04, 0x00, 0x99, 0x69, 0x78, 0xcc, 0xe1, 0x15, 0x24, - 0xcc, 0x9a, 0x87, 0xf4, 0x9b, 0x6a, 0xaf, 0xab, 0x5d, 0x32, 0x65, 0x99, 0x13, 0xcd, 0xb6, 0x00, - 0x46, 0x69, 0xaa, 0xbf, 0xe4, 0x00, 0xc8, 0x21, 0x9c, 0x01, 0xd4, 0x13, 0x23, 0xd8, 0x3a, 0x34, - 0x63, 0x3b, 0x33, 0x76, 0xca, 0x38, 0x5d, 0x85, 0xe7, 0x59, 0xd8, 0xf1, 0x2a, 0x37, 0x3c, 0x16, - 0x73, 0x1d, 0x2f, 0x47, 0x0e, 0xb7, 0x02, 0xe7, 0x0b, 0xa8, 0x10, 0x03, 0x13, 0x34, 0xd6, 0xae, - 0xd2, 0xb6, 0x79, 0x36, 0xfd, 0xa1, 0x8c, 0x69, 0xda, 0xdd, 0x32, 0x86, 0x30, 0x37, 0x00, 0xf6, - 0x3e, 0x76, 0xa1, 0x23, 0xeb, 0xd1, 0xcb, 0x70, 0x28, 0x76, 0x3e, 0x81, 0x7a, 0xc2, 0xc6, 0x95, - 0x07, 0x7e, 0x28, 0xac, 0x89, 0x44, 0xe3, 0xa5, 0xc3, 0x3b, 0x71, 0x95, 0xeb, 0x69, 0x61, 0xda, - 0x94, 0x0a, 0xcf, 0x18, 0xce, 0x7b, 0xd0, 0xcc, 0xe5, 0x1d, 0x86, 0xdb, 0x53, 0xda, 0x46, 0x93, - 0xfd, 0xe6, 0xc3, 0xf9, 0x14, 0x56, 0xe6, 0x72, 0x00, 0x8b, 0x95, 0x3f, 0x4a, 0x8a, 0x95, 0x29, - 0x44, 0x17, 0xba, 0x2d, 0x06, 0xe5, 0x33, 0xe1, 0x9e, 0xda, 0x4e, 0x8b, 0x68, 0xe7, 0xf7, 0x78, - 0xb7, 0x4a, 0x3a, 0xe0, 0xff, 0x07, 0x38, 0xd1, 0x3a, 0x7a, 0x46, 0x2d, 0xb1, 0x55, 0xd6, 0x40, - 0x0e, 0x21, 0xd8, 0x0d, 0x68, 0xe2, 0x47, 0x6c, 0xe5, 0x46, 0x35, 0x8d, 0x88, 0x0d, 0xe0, 0xff, - 0xa0, 0x31, 0x4e, 0x87, 0x97, 0x6c, 0x0c, 0x24, 0xa3, 0xdf, 0x81, 0x7a, 0x28, 0xad, 0xcc, 0x74, - 0xe8, 0xb5, 0x50, 0xa6, 0xe3, 0xdc, 0x20, 0xb0, 0xb2, 0x8a, 0x19, 0xe7, 0x06, 0x01, 0x09, 0x9d, - 0x5b, 0xf0, 0xc6, 0x85, 0x5b, 0x22, 0x7b, 0x0b, 0xaa, 0x63, 0x3f, 0xd0, 0x54, 0x94, 0xf0, 0x46, - 0x60, 0xbf, 0x9c, 0x7f, 0x16, 0x00, 0xb2, 0xf8, 0xc1, 0xac, 0xc0, 0xea, 0x82, 0x98, 0x65, 0x53, - 0x4d, 0x02, 0xa8, 0x4f, 0xec, 0x39, 0x65, 0x23, 0xe3, 0xfa, 0x7c, 0xcc, 0x6d, 0x25, 0xc7, 0x98, - 0x39, 0xc1, 0xb6, 0xed, 0x09, 0xf6, 0x3a, 0x37, 0xb9, 0x74, 0x06, 0x6a, 0xb4, 0xf2, 0x17, 0x7b, - 0xc8, 0xd2, 0x99, 0x5b, 0xc9, 0xea, 0x43, 0x58, 0x99, 0x9b, 0xf2, 0x07, 0xd6, 0xac, 0xec, 0xbc, - 0xcd, 0xe7, 0xf2, 0x36, 0x54, 0xcd, 0x8b, 0x00, 0xdb, 0x80, 0x9a, 0xeb, 0x99, 0x34, 0xce, 0x1d, - 0x25, 0x28, 0xdc, 0x21, 0x36, 0x4f, 0xc4, 0xce, 0x9f, 0x8b, 0x00, 0x19, 0xff, 0x35, 0xba, 0xed, - 0xfb, 0xd0, 0x8a, 0x85, 0x27, 0xc3, 0x91, 0xab, 0x66, 0x24, 0xb5, 0x57, 0xd6, 0xcb, 0x86, 0x2c, - 0x20, 0x73, 0x9d, 0x77, 0xe9, 0xd5, 0x9d, 0xf7, 0x06, 0x94, 0x3d, 0x19, 0xcd, 0x6c, 0x69, 0x62, - 0xf3, 0x0b, 0xd9, 0x93, 0xd1, 0x6c, 0x7f, 0x89, 0x13, 0x82, 0x6d, 0x41, 0x75, 0x72, 0x4a, 0x6f, - 0x24, 0xe6, 0xae, 0x77, 0x6d, 0x1e, 0xfb, 0xe8, 0x14, 0xe9, 0xfd, 0x25, 0x6e, 0x51, 0xec, 0x16, - 0x54, 0x26, 0xa7, 0x23, 0x5f, 0xd9, 0xe2, 0x72, 0x75, 0x11, 0xde, 0xf5, 0x15, 0x3d, 0x89, 0x20, - 0x86, 0x39, 0x50, 0x54, 0x13, 0xfb, 0x20, 0xd2, 0x5e, 0xf0, 0xe6, 0x64, 0x7f, 0x89, 0x17, 0xd5, - 0x64, 0xb7, 0x0e, 0x55, 0xe3, 0x57, 0xe7, 0x1f, 0x25, 0x68, 0xcd, 0x5b, 0x89, 0x3b, 0x1b, 0x2b, - 0x2f, 0xd9, 0xd9, 0x58, 0x79, 0xe9, 0xa5, 0xa4, 0x98, 0xbb, 0x94, 0x38, 0x50, 0x91, 0x67, 0xa1, - 0x50, 0xf9, 0xc7, 0xa0, 0xbd, 0x13, 0x79, 0x16, 0x62, 0x63, 0x6c, 0x44, 0x73, 0x7d, 0x66, 0xc5, - 0xf6, 0x99, 0xef, 0xc3, 0xca, 0x58, 0x06, 0x81, 0x3c, 0x1b, 0xce, 0x26, 0x81, 0x1f, 0x9e, 0xda, - 0x66, 0x73, 0x9e, 0xc9, 0x36, 0xe0, 0xca, 0xc8, 0x57, 0x68, 0xce, 0x9e, 0x0c, 0xb5, 0x08, 0xe9, - 0xaa, 0x8b, 0xb8, 0x45, 0x36, 0xfb, 0x0c, 0xd6, 0x5d, 0xad, 0xc5, 0x24, 0xd2, 0x4f, 0xc2, 0xc8, - 0xf5, 0x4e, 0xbb, 0xd2, 0xa3, 0x2c, 0x9c, 0x44, 0xae, 0xf6, 0x8f, 0xfc, 0xc0, 0xd7, 0x33, 0x72, - 0x46, 0x9d, 0xbf, 0x12, 0xc7, 0x3e, 0x80, 0x96, 0xa7, 0x84, 0xab, 0x45, 0x57, 0xc4, 0xfa, 0xd0, - 0xd5, 0x27, 0x9d, 0x3a, 0x8d, 0x5c, 0xe0, 0xe2, 0x1a, 0x5c, 0xb4, 0xf6, 0x0b, 0x3f, 0x18, 0x79, - 0x78, 0xbd, 0x6c, 0x98, 0x35, 0xcc, 0x31, 0xd9, 0x16, 0x30, 0x62, 0xf4, 0x26, 0x91, 0x9e, 0xa5, - 0x50, 0x20, 0xe8, 0x25, 0x12, 0x3c, 0x70, 0xb5, 0x3f, 0x11, 0xb1, 0x76, 0x27, 0x11, 0xdd, 0x9c, - 0x4b, 0x3c, 0x63, 0xb0, 0x9b, 0xd0, 0xf6, 0x43, 0x2f, 0x98, 0x8e, 0xc4, 0xb3, 0x08, 0x17, 0xa2, - 0xc2, 0xb8, 0xb3, 0x4c, 0xa7, 0xca, 0x15, 0xcb, 0x3f, 0xb4, 0x6c, 0x84, 0x8a, 0xf3, 0x05, 0xe8, - 0x8a, 0x81, 0x5a, 0x7e, 0x02, 0x75, 0xbe, 0x2c, 0x40, 0x7b, 0x31, 0xf0, 0x70, 0xdb, 0x22, 0x5c, - 0xbc, 0xbd, 0x5c, 0x23, 0x9d, 0x6e, 0x65, 0x31, 0xb7, 0x95, 0x49, 0xbd, 0x2c, 0xe5, 0xea, 0x65, - 0x1a, 0x16, 0xe5, 0x97, 0x87, 0xc5, 0xdc, 0x42, 0x2b, 0x0b, 0x0b, 0x75, 0x7e, 0x57, 0x80, 0x2b, - 0x0b, 0xc1, 0xfd, 0x83, 0x2d, 0x5a, 0x87, 0xe6, 0xc4, 0x3d, 0x15, 0xe6, 0x69, 0x22, 0xb6, 0x25, - 0x24, 0xcf, 0xfa, 0x2f, 0xd8, 0x17, 0xc2, 0x72, 0x3e, 0xa3, 0x2e, 0xb5, 0x2d, 0x09, 0x90, 0x03, - 0xa9, 0x1f, 0xc8, 0xa9, 0xad, 0xc5, 0x49, 0x80, 0x24, 0xcc, 0x8b, 0x61, 0x54, 0xba, 0x24, 0x8c, - 0x9c, 0x03, 0xa8, 0x27, 0x06, 0xb2, 0x1b, 0xf6, 0xed, 0xa8, 0x90, 0x3d, 0x89, 0x3e, 0x89, 0x85, - 0x42, 0xdb, 0xcd, 0x43, 0xd2, 0xbb, 0x50, 0x31, 0x6d, 0x68, 0xf1, 0x22, 0xc2, 0x48, 0x9c, 0x21, - 0xd4, 0x2c, 0x87, 0x6d, 0x42, 0xf5, 0x68, 0x96, 0xbe, 0xa3, 0xd8, 0xe3, 0x02, 0xbf, 0x47, 0x16, - 0x81, 0x67, 0x90, 0x41, 0xb0, 0x6b, 0x50, 0x3e, 0x9a, 0xf5, 0xbb, 0xe6, 0x62, 0x89, 0x27, 0x19, - 0x7e, 0xed, 0x56, 0x8d, 0x41, 0xce, 0xe7, 0xb0, 0x9c, 0x1f, 0x97, 0x16, 0xf6, 0x42, 0xae, 0xb0, - 0xa7, 0x47, 0x76, 0xf1, 0x55, 0x37, 0x8c, 0x8f, 0x00, 0xe8, 0xa5, 0xf7, 0x75, 0x6f, 0x26, 0x3f, - 0x86, 0x9a, 0x7d, 0x21, 0x66, 0x1f, 0x2c, 0xbc, 0x78, 0xb7, 0xd2, 0xe7, 0xe3, 0xb9, 0x67, 0x6f, - 0xe7, 0x3e, 0xf6, 0xa8, 0x67, 0x42, 0x75, 0xfd, 0xf1, 0xf8, 0x75, 0xa7, 0xbb, 0x0f, 0xad, 0x27, - 0x51, 0xf4, 0xef, 0x8d, 0xfd, 0x39, 0x54, 0xcd, 0x43, 0x35, 0x8e, 0x09, 0xd0, 0x02, 0xbb, 0x07, - 0xcc, 0xf4, 0xb1, 0x79, 0x93, 0xb8, 0x01, 0x20, 0x72, 0x8a, 0xf3, 0xd9, 0xcd, 0x25, 0xe4, 0xbc, - 0x01, 0xdc, 0x00, 0x36, 0x37, 0xa0, 0x66, 0xdf, 0x44, 0x59, 0x03, 0x2a, 0x4f, 0x0e, 0x86, 0xbd, - 0xc7, 0xed, 0x25, 0x56, 0x87, 0xf2, 0xfe, 0x60, 0xf8, 0xb8, 0x5d, 0x40, 0xea, 0x60, 0x70, 0xd0, - 0x6b, 0x17, 0x37, 0x6f, 0xc2, 0x72, 0xfe, 0x55, 0x94, 0x35, 0xa1, 0x36, 0xdc, 0x39, 0xe8, 0xee, - 0x0e, 0x7e, 0xd6, 0x5e, 0x62, 0xcb, 0x50, 0xef, 0x1f, 0x0c, 0x7b, 0x7b, 0x4f, 0x78, 0xaf, 0x5d, - 0xd8, 0xfc, 0x29, 0x34, 0xd2, 0x87, 0x22, 0xd4, 0xb0, 0xdb, 0x3f, 0xe8, 0xb6, 0x97, 0x18, 0x40, - 0x75, 0xd8, 0xdb, 0xe3, 0x3d, 0xd4, 0x5b, 0x83, 0xd2, 0x70, 0xb8, 0xdf, 0x2e, 0xe2, 0xac, 0x7b, - 0x3b, 0x7b, 0xfb, 0xbd, 0x76, 0x09, 0xc9, 0xc7, 0x8f, 0x0e, 0x1f, 0x0c, 0xdb, 0xe5, 0xcd, 0x8f, - 0xe0, 0xca, 0xc2, 0x13, 0x0a, 0x8d, 0xde, 0xdf, 0xe1, 0x3d, 0xd4, 0xd4, 0x84, 0xda, 0x21, 0xef, - 0x3f, 0xdd, 0x79, 0xdc, 0x6b, 0x17, 0x50, 0xf0, 0xf9, 0x60, 0xef, 0x61, 0xaf, 0xdb, 0x2e, 0xee, - 0x5e, 0xff, 0xea, 0xc5, 0x5a, 0xe1, 0x9b, 0x17, 0x6b, 0x85, 0x6f, 0x5f, 0xac, 0x15, 0xfe, 0xf6, - 0x62, 0xad, 0xf0, 0xe5, 0xf7, 0x6b, 0x4b, 0xdf, 0x7c, 0xbf, 0xb6, 0xf4, 0xed, 0xf7, 0x6b, 0x4b, - 0x47, 0x55, 0xfa, 0xab, 0xe3, 0xc3, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x4f, 0x06, 0xaa, - 0x2a, 0x19, 0x00, 0x00, + // 2577 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0x4f, 0x6f, 0x5b, 0xc7, + 0x11, 0x17, 0xff, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xa8, 0xae, 0xac, 0xbc, 0xa4, 0x81, + 0x2c, 0xdb, 0x12, 0xaa, 0x00, 0x71, 0x60, 0x04, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xc7, 0xa2, 0xb0, + 0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xea, 0x41, 0xef, 0xbd, 0x7d, 0x78, 0x6f, 0x69, + 0x89, 0x3d, 0xf4, 0xd0, 0x53, 0x8f, 0x01, 0x0a, 0x14, 0xbd, 0x14, 0xfd, 0x12, 0x3d, 0xb6, 0xf7, + 0x00, 0xb9, 0xe4, 0xd0, 0x43, 0xd0, 0x43, 0x5a, 0x38, 0x97, 0x7e, 0x88, 0x16, 0x28, 0x66, 0x76, + 0xdf, 0x1f, 0x52, 0x32, 0x6c, 0xb7, 0x45, 0x4f, 0x9c, 0x37, 0xf3, 0xdb, 0xd9, 0xd9, 0xd9, 0x99, + 0x9d, 0xd9, 0x25, 0x34, 0x64, 0x18, 0x6f, 0x85, 0x91, 0x54, 0x92, 0x15, 0xc3, 0xe3, 0xd5, 0x3b, + 0x13, 0x57, 0x9d, 0x4c, 0x8f, 0xb7, 0x1c, 0xe9, 0x6f, 0x4f, 0xe4, 0x44, 0x6e, 0x93, 0xe8, 0x78, + 0x3a, 0xa6, 0x2f, 0xfa, 0x20, 0x4a, 0x0f, 0xb1, 0xfe, 0x51, 0x84, 0xe2, 0x20, 0x64, 0xef, 0x42, + 0xd5, 0x0d, 0xc2, 0xa9, 0x8a, 0x3b, 0x85, 0xf5, 0xd2, 0x46, 0x73, 0xa7, 0xb1, 0x15, 0x1e, 0x6f, + 0xf5, 0x91, 0xc3, 0x8d, 0x80, 0xad, 0x43, 0x59, 0x9c, 0x0b, 0xa7, 0x53, 0x5c, 0x2f, 0x6c, 0x34, + 0x77, 0x00, 0x01, 0xbd, 0x73, 0xe1, 0x0c, 0xc2, 0x83, 0x25, 0x4e, 0x12, 0xf6, 0x01, 0x54, 0x63, + 0x39, 0x8d, 0x1c, 0xd1, 0x29, 0x11, 0x66, 0x19, 0x31, 0x43, 0xe2, 0x10, 0xca, 0x48, 0x51, 0xd3, + 0xd8, 0xf5, 0x44, 0xa7, 0x9c, 0x69, 0xba, 0xef, 0x7a, 0x1a, 0x43, 0x12, 0xf6, 0x1e, 0x54, 0x8e, + 0xa7, 0xae, 0x37, 0xea, 0x54, 0x08, 0xd2, 0x44, 0xc8, 0x1e, 0x32, 0x08, 0xa3, 0x65, 0x08, 0xf2, + 0x45, 0x34, 0x11, 0x9d, 0x6a, 0x06, 0x7a, 0x88, 0x0c, 0x0d, 0x22, 0x19, 0xce, 0x35, 0x72, 0xc7, + 0xe3, 0x4e, 0x2d, 0x9b, 0xab, 0xeb, 0x8e, 0xc7, 0x7a, 0x2e, 0x94, 0xb0, 0x0d, 0xa8, 0x87, 0x9e, + 0xad, 0xc6, 0x32, 0xf2, 0x3b, 0x90, 0xd9, 0x7d, 0x64, 0x78, 0x3c, 0x95, 0xb2, 0xbb, 0xd0, 0x74, + 0x64, 0x10, 0xab, 0xc8, 0x76, 0x03, 0x15, 0x77, 0x9a, 0x04, 0x7e, 0x13, 0xc1, 0x5f, 0xc8, 0xe8, + 0x54, 0x44, 0xfb, 0x99, 0x90, 0xe7, 0x91, 0x7b, 0x65, 0x28, 0xca, 0xd0, 0xfa, 0x6d, 0x01, 0xea, + 0x89, 0x56, 0x66, 0xc1, 0xf2, 0x6e, 0xe4, 0x9c, 0xb8, 0x4a, 0x38, 0x6a, 0x1a, 0x89, 0x4e, 0x61, + 0xbd, 0xb0, 0xd1, 0xe0, 0x73, 0x3c, 0xd6, 0x82, 0xe2, 0x60, 0x48, 0xfe, 0x6e, 0xf0, 0xe2, 0x60, + 0xc8, 0x3a, 0x50, 0x7b, 0x62, 0x47, 0xae, 0x1d, 0x28, 0x72, 0x70, 0x83, 0x27, 0x9f, 0xec, 0x3a, + 0x34, 0x06, 0xc3, 0x27, 0x22, 0x8a, 0x5d, 0x19, 0x90, 0x5b, 0x1b, 0x3c, 0x63, 0xb0, 0x35, 0x80, + 0xc1, 0xf0, 0xbe, 0xb0, 0x51, 0x69, 0xdc, 0xa9, 0xac, 0x97, 0x36, 0x1a, 0x3c, 0xc7, 0xb1, 0x7e, + 0x09, 0x15, 0xda, 0x6a, 0xf6, 0x19, 0x54, 0x47, 0xee, 0x44, 0xc4, 0x4a, 0x9b, 0xb3, 0xb7, 0xf3, + 0xd5, 0x77, 0x37, 0x96, 0xfe, 0xfa, 0xdd, 0x8d, 0xcd, 0x5c, 0x4c, 0xc9, 0x50, 0x04, 0x8e, 0x0c, + 0x94, 0xed, 0x06, 0x22, 0x8a, 0xb7, 0x27, 0xf2, 0x8e, 0x1e, 0xb2, 0xd5, 0xa5, 0x1f, 0x6e, 0x34, + 0xb0, 0x9b, 0x50, 0x71, 0x83, 0x91, 0x38, 0x27, 0xfb, 0x4b, 0x7b, 0x57, 0x8d, 0xaa, 0xe6, 0x60, + 0xaa, 0xc2, 0xa9, 0xea, 0xa3, 0x88, 0x6b, 0x84, 0xf5, 0x75, 0x01, 0xaa, 0x3a, 0x94, 0xd8, 0x75, + 0x28, 0xfb, 0x42, 0xd9, 0x34, 0x7f, 0x73, 0xa7, 0xae, 0xb7, 0x54, 0xd9, 0x9c, 0xb8, 0x18, 0xa5, + 0xbe, 0x9c, 0xa2, 0xef, 0x8b, 0x59, 0x94, 0x3e, 0x44, 0x0e, 0x37, 0x02, 0xf6, 0x23, 0xa8, 0x05, + 0x42, 0x9d, 0xc9, 0xe8, 0x94, 0x7c, 0xd4, 0xd2, 0x61, 0x71, 0x28, 0xd4, 0x43, 0x39, 0x12, 0x3c, + 0x91, 0xb1, 0xdb, 0x50, 0x8f, 0x85, 0x33, 0x8d, 0x5c, 0x35, 0x23, 0x7f, 0xb5, 0x76, 0xda, 0x14, + 0xac, 0x86, 0x47, 0xe0, 0x14, 0xc1, 0x6e, 0x41, 0x23, 0x16, 0x4e, 0x24, 0x94, 0x08, 0x9e, 0x91, + 0xff, 0x9a, 0x3b, 0x2b, 0x06, 0x1e, 0x09, 0xd5, 0x0b, 0x9e, 0xf1, 0x4c, 0x6e, 0x7d, 0x5d, 0x84, + 0x32, 0xda, 0xcc, 0x18, 0x94, 0xed, 0x68, 0xa2, 0x33, 0xaa, 0xc1, 0x89, 0x66, 0x6d, 0x28, 0xa1, + 0x8e, 0x22, 0xb1, 0x90, 0x44, 0x8e, 0x73, 0x36, 0x32, 0x1b, 0x8a, 0x24, 0x8e, 0x9b, 0xc6, 0x22, + 0x32, 0xfb, 0x48, 0x34, 0xbb, 0x09, 0x8d, 0x30, 0x92, 0xe7, 0xb3, 0xa7, 0xda, 0x82, 0x2c, 0x4a, + 0x91, 0x89, 0x06, 0xd4, 0x43, 0x43, 0xb1, 0x4d, 0x00, 0x71, 0xae, 0x22, 0xfb, 0x40, 0xc6, 0x2a, + 0xee, 0x54, 0xc9, 0x5a, 0x8a, 0x7b, 0x64, 0xf4, 0x8f, 0x78, 0x4e, 0xca, 0x56, 0xa1, 0x7e, 0x22, + 0x63, 0x15, 0xd8, 0xbe, 0xa0, 0x0c, 0x69, 0xf0, 0xf4, 0x9b, 0x59, 0x50, 0x9d, 0x7a, 0xae, 0xef, + 0xaa, 0x4e, 0x23, 0xd3, 0xf1, 0x98, 0x38, 0xdc, 0x48, 0x30, 0x8a, 0x9d, 0x49, 0x24, 0xa7, 0xe1, + 0x91, 0x1d, 0x89, 0x40, 0x51, 0xfe, 0x34, 0xf8, 0x1c, 0x8f, 0x7d, 0x02, 0xef, 0x44, 0xc2, 0x97, + 0xcf, 0x04, 0x6d, 0xd4, 0x50, 0x4d, 0x8f, 0x63, 0x8e, 0x8e, 0x8d, 0xdd, 0x67, 0x82, 0x72, 0xa8, + 0xce, 0x5f, 0x0c, 0xb0, 0x6e, 0x43, 0x55, 0xdb, 0x8d, 0x6e, 0x41, 0xca, 0x64, 0x0a, 0xd1, 0x98, + 0x21, 0xfd, 0xa3, 0x24, 0x43, 0xfa, 0x47, 0x56, 0x17, 0xaa, 0xda, 0x42, 0x44, 0x1f, 0xe2, 0xaa, + 0x0c, 0x1a, 0x69, 0xe4, 0x0d, 0xe5, 0x58, 0xe9, 0x88, 0xe4, 0x44, 0x93, 0x56, 0x3b, 0xd2, 0xfe, + 0x2f, 0x71, 0xa2, 0xad, 0x07, 0xd0, 0x48, 0x77, 0x96, 0xa6, 0xe8, 0x1a, 0x35, 0xc5, 0x7e, 0x17, + 0x07, 0x90, 0xbb, 0xf4, 0xa4, 0x44, 0xa3, 0x1b, 0x65, 0xa8, 0x5c, 0x19, 0xd8, 0x1e, 0x29, 0xaa, + 0xf3, 0xf4, 0xdb, 0xfa, 0x5d, 0x09, 0x2a, 0xb4, 0x30, 0xb6, 0x81, 0x19, 0x11, 0x4e, 0xf5, 0x0a, + 0x4a, 0x7b, 0xcc, 0x64, 0x04, 0x50, 0xee, 0xa5, 0x09, 0x81, 0x79, 0xb8, 0x8a, 0xd1, 0xe9, 0x09, + 0x47, 0xc9, 0xc8, 0xcc, 0x93, 0x7e, 0xe3, 0xfc, 0x23, 0xcc, 0x50, 0x1d, 0x30, 0x44, 0xb3, 0x5b, + 0x50, 0x95, 0x94, 0x56, 0x14, 0x33, 0x2f, 0x48, 0x36, 0x03, 0x41, 0xe5, 0x91, 0xb0, 0x47, 0x32, + 0xf0, 0x66, 0x14, 0x49, 0x75, 0x9e, 0x7e, 0x63, 0xa0, 0x53, 0x1e, 0x3d, 0x9a, 0x85, 0xfa, 0x58, + 0x6d, 0xe9, 0x40, 0x7f, 0x98, 0x30, 0x79, 0x26, 0xc7, 0x83, 0xf3, 0x91, 0x1f, 0x8e, 0xe3, 0x41, + 0xa8, 0x3a, 0x57, 0xb3, 0x90, 0x4c, 0x78, 0x3c, 0x95, 0x22, 0xd2, 0xb1, 0x9d, 0x13, 0x81, 0xc8, + 0x6b, 0x19, 0x72, 0xdf, 0xf0, 0x78, 0x2a, 0xcd, 0x32, 0x0d, 0xa1, 0x6f, 0x12, 0x34, 0x97, 0x69, + 0x88, 0xcd, 0xe4, 0x18, 0xa1, 0xc3, 0xe1, 0x01, 0x22, 0xdf, 0xca, 0x4e, 0x77, 0xcd, 0xe1, 0x46, + 0xa2, 0x57, 0x1b, 0x4f, 0x3d, 0xd5, 0xef, 0x76, 0xde, 0xd6, 0xae, 0x4c, 0xbe, 0xad, 0xb5, 0x6c, + 0x01, 0xe8, 0xd6, 0xd8, 0xfd, 0x85, 0x8e, 0x97, 0x12, 0x27, 0xda, 0xea, 0x43, 0x3d, 0x31, 0xf1, + 0x42, 0x18, 0xdc, 0x81, 0x5a, 0x7c, 0x62, 0x47, 0x6e, 0x30, 0xa1, 0x1d, 0x6a, 0xed, 0x5c, 0x4d, + 0x57, 0x34, 0xd4, 0x7c, 0xb4, 0x22, 0xc1, 0x58, 0x32, 0x09, 0xa9, 0xcb, 0x74, 0xb5, 0xa1, 0x34, + 0x75, 0x47, 0xa4, 0x67, 0x85, 0x23, 0x89, 0x9c, 0x89, 0xab, 0x83, 0x72, 0x85, 0x23, 0x89, 0xf6, + 0xf9, 0x72, 0xa4, 0x6b, 0xe6, 0x0a, 0x27, 0x7a, 0x2e, 0xec, 0x2a, 0x0b, 0x61, 0xe7, 0x25, 0xbe, + 0xf9, 0xbf, 0xcc, 0xf6, 0x9b, 0x02, 0xd4, 0x93, 0x42, 0x8f, 0xe5, 0xc6, 0x1d, 0x89, 0x40, 0xb9, + 0x63, 0x57, 0x44, 0x66, 0xe2, 0x1c, 0x87, 0xdd, 0x81, 0x8a, 0xad, 0x54, 0x94, 0x1c, 0xe2, 0x6f, + 0xe7, 0xbb, 0x84, 0xad, 0x5d, 0x94, 0xf4, 0x02, 0x15, 0xcd, 0xb8, 0x46, 0xad, 0x7e, 0x0c, 0x90, + 0x31, 0xd1, 0xd6, 0x53, 0x31, 0x33, 0x5a, 0x91, 0x64, 0xd7, 0xa0, 0xf2, 0xcc, 0xf6, 0xa6, 0x49, + 0x46, 0xea, 0x8f, 0x7b, 0xc5, 0x8f, 0x0b, 0xd6, 0x9f, 0x8b, 0x50, 0x33, 0x5d, 0x03, 0xbb, 0x0d, + 0x35, 0xea, 0x1a, 0x8c, 0x45, 0x97, 0xa7, 0x5f, 0x02, 0x61, 0xdb, 0x69, 0x3b, 0x94, 0xb3, 0xd1, + 0xa8, 0xd2, 0x6d, 0x91, 0xb1, 0x31, 0x6b, 0x8e, 0x4a, 0x23, 0x31, 0x36, 0x7d, 0x4f, 0x8b, 0xba, + 0x0c, 0x31, 0x76, 0x03, 0x17, 0xfd, 0xc3, 0x51, 0xc4, 0x6e, 0x27, 0xab, 0x2e, 0x93, 0xc6, 0xb7, + 0xf2, 0x1a, 0x2f, 0x2e, 0xba, 0x0f, 0xcd, 0xdc, 0x34, 0x97, 0xac, 0xfa, 0xfd, 0xfc, 0xaa, 0xcd, + 0x94, 0xa4, 0x4e, 0x37, 0x6d, 0x99, 0x17, 0xfe, 0x0b, 0xff, 0x7d, 0x04, 0x90, 0xa9, 0x7c, 0xf5, + 0xe3, 0xcb, 0xfa, 0x53, 0x09, 0x60, 0x10, 0x62, 0x0d, 0x1c, 0xd9, 0x54, 0xb5, 0x97, 0xdd, 0x49, + 0x20, 0x23, 0xf1, 0x94, 0xd2, 0x9c, 0xc6, 0xd7, 0x79, 0x53, 0xf3, 0x28, 0x63, 0xd8, 0x2e, 0x34, + 0x47, 0x22, 0x76, 0x22, 0x97, 0x02, 0xca, 0x38, 0xfd, 0x06, 0xae, 0x29, 0xd3, 0xb3, 0xd5, 0xcd, + 0x10, 0xda, 0x57, 0xf9, 0x31, 0x6c, 0x07, 0x96, 0xc5, 0x79, 0x28, 0x23, 0x65, 0x66, 0xd1, 0xcd, + 0xe5, 0x15, 0xdd, 0xa6, 0x22, 0x9f, 0x66, 0xe2, 0x4d, 0x91, 0x7d, 0x30, 0x1b, 0xca, 0x8e, 0x1d, + 0xc6, 0xa6, 0xa4, 0x77, 0x16, 0xe6, 0xdb, 0xb7, 0x43, 0xed, 0xb4, 0xbd, 0x0f, 0x71, 0xad, 0xbf, + 0xfa, 0xdb, 0x8d, 0x5b, 0xb9, 0x3e, 0xc8, 0x97, 0xc7, 0xb3, 0x6d, 0x8a, 0x97, 0x53, 0x57, 0x6d, + 0x4f, 0x95, 0xeb, 0x6d, 0xdb, 0xa1, 0x8b, 0xea, 0x70, 0x60, 0xbf, 0xcb, 0x49, 0x35, 0xfb, 0x18, + 0x5a, 0x61, 0x24, 0x27, 0x91, 0x88, 0xe3, 0xa7, 0x54, 0x15, 0x4d, 0xb7, 0xfa, 0x86, 0xa9, 0xde, + 0x24, 0xf9, 0x14, 0x05, 0x7c, 0x25, 0xcc, 0x7f, 0xae, 0xfe, 0x04, 0xda, 0x8b, 0x2b, 0x7e, 0x9d, + 0xdd, 0x5b, 0xbd, 0x0b, 0x8d, 0x74, 0x05, 0x2f, 0x1b, 0x58, 0xcf, 0x6f, 0xfb, 0x1f, 0x0b, 0x50, + 0xd5, 0xf9, 0xc8, 0xee, 0x42, 0xc3, 0x93, 0x8e, 0x8d, 0x06, 0x24, 0x37, 0x83, 0x77, 0xb2, 0x74, + 0xdd, 0xfa, 0x3c, 0x91, 0xe9, 0xfd, 0xc8, 0xb0, 0x18, 0x9e, 0x6e, 0x30, 0x96, 0x49, 0xfe, 0xb4, + 0xb2, 0x41, 0xfd, 0x60, 0x2c, 0xb9, 0x16, 0xae, 0x3e, 0x80, 0xd6, 0xbc, 0x8a, 0x4b, 0xec, 0x7c, + 0x6f, 0x3e, 0xd0, 0xa9, 0x1a, 0xa4, 0x83, 0xf2, 0x66, 0xdf, 0x85, 0x46, 0xca, 0x67, 0x9b, 0x17, + 0x0d, 0x5f, 0xce, 0x8f, 0xcc, 0xd9, 0x6a, 0xfd, 0xba, 0x00, 0x90, 0xd9, 0x86, 0xe7, 0x1c, 0xde, + 0x41, 0x82, 0xac, 0x7b, 0x48, 0xbf, 0xa9, 0xf8, 0xda, 0xca, 0x26, 0x5b, 0x96, 0x39, 0xd1, 0x6c, + 0x0b, 0x60, 0x94, 0xe6, 0xfa, 0x0b, 0x4e, 0x80, 0x1c, 0x02, 0xf5, 0x7b, 0x76, 0x30, 0x99, 0xda, + 0x13, 0x61, 0x5a, 0xbc, 0xf4, 0xdb, 0x1a, 0x40, 0x3d, 0xb1, 0x90, 0xad, 0x43, 0x33, 0x36, 0x56, + 0x61, 0x1b, 0x8d, 0xa6, 0x54, 0x78, 0x9e, 0x85, 0xed, 0x70, 0x64, 0x07, 0x13, 0x31, 0xd7, 0x0e, + 0x73, 0xe4, 0x70, 0x23, 0xb0, 0xbe, 0x80, 0x0a, 0x31, 0x30, 0x7b, 0x63, 0x65, 0x47, 0xca, 0x74, + 0xd6, 0xba, 0x79, 0x94, 0x31, 0x99, 0xb4, 0x57, 0xc6, 0xf8, 0xe6, 0x1a, 0xc0, 0xde, 0xc7, 0x16, + 0x75, 0x64, 0xdc, 0x7d, 0x19, 0x0e, 0xc5, 0xd6, 0x27, 0x50, 0x4f, 0xd8, 0xe8, 0x15, 0xcf, 0x0d, + 0x84, 0x31, 0x91, 0x68, 0xbc, 0x91, 0x38, 0x27, 0x76, 0x64, 0x3b, 0x4a, 0xe8, 0x1e, 0xa6, 0xc2, + 0x33, 0x86, 0xf5, 0x1e, 0x34, 0x73, 0x49, 0x89, 0xb1, 0xf8, 0x84, 0xf6, 0x58, 0x1f, 0x0d, 0xfa, + 0xc3, 0xfa, 0x14, 0x56, 0xe6, 0x12, 0x04, 0x2b, 0x99, 0x3b, 0x4a, 0x2a, 0x99, 0xae, 0x52, 0x17, + 0x5a, 0x31, 0x06, 0xe5, 0x33, 0x61, 0x9f, 0x9a, 0x36, 0x8c, 0x68, 0xeb, 0x0f, 0x78, 0xf1, 0x4a, + 0xda, 0xe3, 0x1f, 0x02, 0x9c, 0x28, 0x15, 0x3e, 0xa5, 0x7e, 0xd9, 0x28, 0x6b, 0x20, 0x87, 0x10, + 0xec, 0x06, 0x34, 0xf1, 0x23, 0x36, 0x72, 0xad, 0x9a, 0x46, 0xc4, 0x1a, 0xf0, 0x03, 0x68, 0x8c, + 0xd3, 0xe1, 0x25, 0x13, 0x1f, 0xc9, 0xe8, 0x77, 0xa0, 0x1e, 0x48, 0x23, 0xd3, 0x7b, 0x5b, 0x0b, + 0x64, 0x3a, 0xce, 0xf6, 0x3c, 0x23, 0xab, 0xe8, 0x71, 0xb6, 0xe7, 0x91, 0xd0, 0xba, 0x05, 0x6f, + 0x5c, 0xb8, 0x42, 0xb2, 0xb7, 0xa0, 0x3a, 0x76, 0x3d, 0x45, 0x15, 0x0b, 0xaf, 0x0b, 0xe6, 0xcb, + 0xfa, 0x57, 0x01, 0x20, 0x8b, 0x2d, 0x4c, 0x19, 0x2c, 0x3d, 0x88, 0x59, 0xd6, 0xa5, 0xc6, 0x83, + 0xba, 0x6f, 0x0e, 0x31, 0x13, 0x19, 0xd7, 0xe7, 0xe3, 0x71, 0x2b, 0x39, 0xe3, 0xf4, 0xf1, 0xb6, + 0x63, 0x8e, 0xb7, 0xd7, 0xb9, 0xe6, 0xa5, 0x33, 0x50, 0x17, 0x96, 0xbf, 0xf5, 0x43, 0x96, 0xeb, + 0xdc, 0x48, 0x56, 0x1f, 0xc0, 0xca, 0xdc, 0x94, 0xaf, 0x58, 0xd0, 0xb2, 0xc3, 0x38, 0x9f, 0xe8, + 0x3b, 0x50, 0xd5, 0xcf, 0x05, 0x6c, 0x03, 0x6a, 0xb6, 0xa3, 0x73, 0x3c, 0x77, 0xce, 0xa0, 0x70, + 0x97, 0xd8, 0x3c, 0x11, 0x5b, 0x7f, 0x29, 0x02, 0x64, 0xfc, 0xd7, 0x68, 0xc5, 0xef, 0x41, 0x2b, + 0x16, 0x8e, 0x0c, 0x46, 0x76, 0x34, 0x23, 0xa9, 0xb9, 0xcf, 0x5e, 0x36, 0x64, 0x01, 0x99, 0x6b, + 0xcb, 0x4b, 0x2f, 0x6f, 0xcb, 0x37, 0xa0, 0xec, 0xc8, 0x70, 0x66, 0xea, 0x16, 0x9b, 0x5f, 0xc8, + 0xbe, 0x0c, 0x67, 0x07, 0x4b, 0x9c, 0x10, 0x6c, 0x0b, 0xaa, 0xfe, 0x29, 0x3d, 0xa0, 0xe8, 0x8b, + 0xe0, 0xb5, 0x79, 0xec, 0xc3, 0x53, 0xa4, 0x0f, 0x96, 0xb8, 0x41, 0xb1, 0x5b, 0x50, 0xf1, 0x4f, + 0x47, 0x6e, 0x64, 0x2a, 0xcf, 0xd5, 0x45, 0x78, 0xd7, 0x8d, 0xe8, 0xbd, 0x04, 0x31, 0xcc, 0x82, + 0x62, 0xe4, 0x9b, 0xd7, 0x92, 0xf6, 0x82, 0x37, 0xfd, 0x83, 0x25, 0x5e, 0x8c, 0xfc, 0xbd, 0x3a, + 0x54, 0xb5, 0x5f, 0xad, 0x7f, 0x96, 0xa0, 0x35, 0x6f, 0x25, 0xee, 0x6c, 0x1c, 0x39, 0xc9, 0xce, + 0xc6, 0x91, 0x93, 0xde, 0x58, 0x8a, 0xb9, 0x1b, 0x8b, 0x05, 0x15, 0x79, 0x16, 0x88, 0x28, 0xff, + 0x52, 0xb4, 0x7f, 0x22, 0xcf, 0x02, 0xec, 0x9a, 0xb5, 0x68, 0xae, 0x09, 0xad, 0x98, 0x26, 0xf4, + 0x7d, 0x58, 0x19, 0x4b, 0xcf, 0x93, 0x67, 0xc3, 0x99, 0xef, 0xb9, 0xc1, 0xa9, 0xe9, 0x44, 0xe7, + 0x99, 0x6c, 0x03, 0xae, 0x8c, 0xdc, 0x08, 0xcd, 0xd9, 0x97, 0x81, 0x12, 0x01, 0xdd, 0x83, 0x11, + 0xb7, 0xc8, 0x66, 0x9f, 0xc1, 0xba, 0xad, 0x94, 0xf0, 0x43, 0xf5, 0x38, 0x08, 0x6d, 0xe7, 0xb4, + 0x2b, 0x1d, 0xca, 0x42, 0x3f, 0xb4, 0x95, 0x7b, 0xec, 0x7a, 0xae, 0x9a, 0x91, 0x33, 0xea, 0xfc, + 0xa5, 0x38, 0xf6, 0x01, 0xb4, 0x9c, 0x48, 0xd8, 0x4a, 0x74, 0x45, 0xac, 0x8e, 0x6c, 0x75, 0xd2, + 0xa9, 0xd3, 0xc8, 0x05, 0x2e, 0xae, 0xc1, 0x46, 0x6b, 0xbf, 0x70, 0xbd, 0x91, 0x83, 0x77, 0xcf, + 0x86, 0x5e, 0xc3, 0x1c, 0x93, 0x6d, 0x01, 0x23, 0x46, 0xcf, 0x0f, 0xd5, 0x2c, 0x85, 0x02, 0x41, + 0x2f, 0x91, 0xe0, 0x81, 0xab, 0x5c, 0x5f, 0xc4, 0xca, 0xf6, 0x43, 0xba, 0x56, 0x97, 0x78, 0xc6, + 0x60, 0x37, 0xa1, 0xed, 0x06, 0x8e, 0x37, 0x1d, 0x89, 0xa7, 0x21, 0x2e, 0x24, 0x0a, 0xe2, 0xce, + 0x32, 0x9d, 0x2a, 0x57, 0x0c, 0xff, 0xc8, 0xb0, 0x11, 0x2a, 0xce, 0x17, 0xa0, 0x2b, 0x1a, 0x6a, + 0xf8, 0x09, 0xd4, 0xfa, 0xb2, 0x00, 0xed, 0xc5, 0xc0, 0xc3, 0x6d, 0x0b, 0x71, 0xf1, 0xe6, 0xe6, + 0x8d, 0x74, 0xba, 0x95, 0xc5, 0xdc, 0x56, 0x26, 0xb5, 0xb4, 0x94, 0xab, 0xa5, 0x69, 0x58, 0x94, + 0x5f, 0x1c, 0x16, 0x73, 0x0b, 0xad, 0x2c, 0x2c, 0xd4, 0xfa, 0x7d, 0x01, 0xae, 0x2c, 0x04, 0xf7, + 0x2b, 0x5b, 0xb4, 0x0e, 0x4d, 0xdf, 0x3e, 0x15, 0xfa, 0xdd, 0x22, 0x36, 0x25, 0x24, 0xcf, 0xfa, + 0x1f, 0xd8, 0x17, 0xc0, 0x72, 0x3e, 0xa3, 0x2e, 0xb5, 0x2d, 0x09, 0x90, 0x43, 0xa9, 0xee, 0xcb, + 0xa9, 0xa9, 0xc5, 0x49, 0x80, 0x24, 0xcc, 0x8b, 0x61, 0x54, 0xba, 0x24, 0x8c, 0xac, 0x43, 0xa8, + 0x27, 0x06, 0xb2, 0x1b, 0xe6, 0x61, 0xa9, 0x90, 0xbd, 0x97, 0x3e, 0x8e, 0x45, 0x84, 0xb6, 0xeb, + 0x57, 0xa6, 0x77, 0xa1, 0xa2, 0x7b, 0xd4, 0xe2, 0x45, 0x84, 0x96, 0x58, 0x43, 0xa8, 0x19, 0x0e, + 0xdb, 0x84, 0xea, 0xf1, 0x2c, 0x7d, 0x64, 0x31, 0xc7, 0x05, 0x7e, 0x8f, 0x0c, 0x02, 0xcf, 0x20, + 0x8d, 0x60, 0xd7, 0xa0, 0x7c, 0x3c, 0xeb, 0x77, 0xf5, 0xad, 0x13, 0x4f, 0x32, 0xfc, 0xda, 0xab, + 0x6a, 0x83, 0xac, 0xcf, 0x61, 0x39, 0x3f, 0x2e, 0x2d, 0xec, 0x85, 0x5c, 0x61, 0x4f, 0x8f, 0xec, + 0xe2, 0xcb, 0xae, 0x1f, 0x1f, 0x01, 0xd0, 0x33, 0xf0, 0xeb, 0x5e, 0x5b, 0x7e, 0x0c, 0x35, 0xf3, + 0x7c, 0xcc, 0x3e, 0x58, 0x78, 0x0e, 0x6f, 0xa5, 0x6f, 0xcb, 0x73, 0x6f, 0xe2, 0xd6, 0x3d, 0x6c, + 0x60, 0xcf, 0x44, 0xd4, 0x75, 0xc7, 0xe3, 0xd7, 0x9d, 0xee, 0x1e, 0xb4, 0x1e, 0x87, 0xe1, 0x7f, + 0x36, 0xf6, 0xe7, 0x50, 0xd5, 0xaf, 0xd8, 0x38, 0xc6, 0x43, 0x0b, 0xcc, 0x1e, 0x30, 0xdd, 0xe4, + 0xe6, 0x4d, 0xe2, 0x1a, 0x80, 0xc8, 0x29, 0xce, 0x67, 0x36, 0x97, 0x90, 0xf3, 0x06, 0x70, 0x0d, + 0xd8, 0xdc, 0x80, 0x9a, 0x79, 0x30, 0x65, 0x0d, 0xa8, 0x3c, 0x3e, 0x1c, 0xf6, 0x1e, 0xb5, 0x97, + 0x58, 0x1d, 0xca, 0x07, 0x83, 0xe1, 0xa3, 0x76, 0x01, 0xa9, 0xc3, 0xc1, 0x61, 0xaf, 0x5d, 0xdc, + 0xbc, 0x09, 0xcb, 0xf9, 0x27, 0x53, 0xd6, 0x84, 0xda, 0x70, 0xf7, 0xb0, 0xbb, 0x37, 0xf8, 0x59, + 0x7b, 0x89, 0x2d, 0x43, 0xbd, 0x7f, 0x38, 0xec, 0xed, 0x3f, 0xe6, 0xbd, 0x76, 0x61, 0xf3, 0xa7, + 0xd0, 0x48, 0x5f, 0x91, 0x50, 0xc3, 0x5e, 0xff, 0xb0, 0xdb, 0x5e, 0x62, 0x00, 0xd5, 0x61, 0x6f, + 0x9f, 0xf7, 0x50, 0x6f, 0x0d, 0x4a, 0xc3, 0xe1, 0x41, 0xbb, 0x88, 0xb3, 0xee, 0xef, 0xee, 0x1f, + 0xf4, 0xda, 0x25, 0x24, 0x1f, 0x3d, 0x3c, 0xba, 0x3f, 0x6c, 0x97, 0x37, 0x3f, 0x82, 0x2b, 0x0b, + 0xef, 0x2b, 0x34, 0xfa, 0x60, 0x97, 0xf7, 0x50, 0x53, 0x13, 0x6a, 0x47, 0xbc, 0xff, 0x64, 0xf7, + 0x51, 0xaf, 0x5d, 0x40, 0xc1, 0xe7, 0x83, 0xfd, 0x07, 0xbd, 0x6e, 0xbb, 0xb8, 0x77, 0xfd, 0xab, + 0xe7, 0x6b, 0x85, 0x6f, 0x9e, 0xaf, 0x15, 0xbe, 0x7d, 0xbe, 0x56, 0xf8, 0xfb, 0xf3, 0xb5, 0xc2, + 0x97, 0xdf, 0xaf, 0x2d, 0x7d, 0xf3, 0xfd, 0xda, 0xd2, 0xb7, 0xdf, 0xaf, 0x2d, 0x1d, 0x57, 0xe9, + 0x7f, 0x90, 0x0f, 0xff, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xd5, 0x3c, 0x38, 0x7a, 0x47, 0x19, 0x00, + 0x00, } func (m *Op) Marshal() (dAtA []byte, err error) { @@ -4323,6 +4332,13 @@ func (m *SourceInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Language) > 0 { + i -= len(m.Language) + copy(dAtA[i:], m.Language) + i = encodeVarintOps(dAtA, i, uint64(len(m.Language))) + i-- + dAtA[i] = 0x22 + } if m.Definition != nil { { size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) @@ -6097,6 +6113,10 @@ func (m *SourceInfo) Size() (n int) { l = m.Definition.Size() n += 1 + l + sovOps(uint64(l)) } + l = len(m.Language) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } return n } @@ -10512,6 +10532,38 @@ func (m *SourceInfo) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Language", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Language = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.proto b/vendor/github.com/moby/buildkit/solver/pb/ops.proto index 87cb771902..4788c093b7 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.proto +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.proto @@ -228,6 +228,7 @@ message SourceInfo { string filename = 1; bytes data = 2; Definition definition = 3; + string language = 4; } // Location defines list of areas in to source file diff --git a/vendor/github.com/moby/buildkit/solver/result/attestation.go b/vendor/github.com/moby/buildkit/solver/result/attestation.go index 77af74da19..2fee278240 100644 --- a/vendor/github.com/moby/buildkit/solver/result/attestation.go +++ b/vendor/github.com/moby/buildkit/solver/result/attestation.go @@ -1,8 +1,6 @@ package result import ( - "reflect" - pb "github.com/moby/buildkit/frontend/gateway/pb" digest "github.com/opencontainers/go-digest" ) @@ -58,9 +56,11 @@ func FromDigestMap(m map[string]string) []digest.Digest { return ds } -func ConvertAttestation[U any, V any](a *Attestation[U], fn func(U) (V, error)) (*Attestation[V], error) { +func ConvertAttestation[U comparable, V comparable](a *Attestation[U], fn func(U) (V, error)) (*Attestation[V], error) { + var zero U + var ref V - if reflect.ValueOf(a.Ref).IsValid() { + if a.Ref != zero { var err error ref, err = fn(a.Ref) if err != nil { diff --git a/vendor/github.com/moby/buildkit/solver/result/result.go b/vendor/github.com/moby/buildkit/solver/result/result.go index d5fe2d03cf..cfcfe9dcbd 100644 --- a/vendor/github.com/moby/buildkit/solver/result/result.go +++ b/vendor/github.com/moby/buildkit/solver/result/result.go @@ -1,13 +1,12 @@ package result import ( - "reflect" "sync" "github.com/pkg/errors" ) -type Result[T any] struct { +type Result[T comparable] struct { mu sync.Mutex Ref T Refs map[string]T @@ -50,7 +49,8 @@ func (r *Result[T]) SingleRef() (T, error) { r.mu.Lock() defer r.mu.Unlock() - if r.Refs != nil && !reflect.ValueOf(r.Ref).IsValid() { + var zero T + if r.Refs != nil && r.Ref == zero { var t T return t, errors.Errorf("invalid map result") } @@ -77,11 +77,12 @@ func (r *Result[T]) FindRef(key string) (T, bool) { } func (r *Result[T]) EachRef(fn func(T) error) (err error) { - if reflect.ValueOf(r.Ref).IsValid() { + var zero T + if r.Ref != zero { err = fn(r.Ref) } for _, r := range r.Refs { - if reflect.ValueOf(r).IsValid() { + if r != zero { if err1 := fn(r); err1 != nil && err == nil { err = err1 } @@ -89,7 +90,7 @@ func (r *Result[T]) EachRef(fn func(T) error) (err error) { } for _, as := range r.Attestations { for _, a := range as { - if reflect.ValueOf(a.Ref).IsValid() { + if a.Ref != zero { if err1 := fn(a.Ref); err1 != nil && err == nil { err = err1 } @@ -102,8 +103,12 @@ func (r *Result[T]) EachRef(fn func(T) error) (err error) { // EachRef iterates over references in both a and b. // a and b are assumed to be of the same size and map their references // to the same set of keys -func EachRef[U any, V any](a *Result[U], b *Result[V], fn func(U, V) error) (err error) { - if reflect.ValueOf(a.Ref).IsValid() && reflect.ValueOf(b.Ref).IsValid() { +func EachRef[U comparable, V comparable](a *Result[U], b *Result[V], fn func(U, V) error) (err error) { + var ( + zeroU U + zeroV V + ) + if a.Ref != zeroU && b.Ref != zeroV { err = fn(a.Ref, b.Ref) } for k, r := range a.Refs { @@ -111,7 +116,7 @@ func EachRef[U any, V any](a *Result[U], b *Result[V], fn func(U, V) error) (err if !ok { continue } - if reflect.ValueOf(r).IsValid() && reflect.ValueOf(r2).IsValid() { + if r != zeroU && r2 != zeroV { if err1 := fn(r, r2); err1 != nil && err == nil { err = err1 } @@ -127,7 +132,7 @@ func EachRef[U any, V any](a *Result[U], b *Result[V], fn func(U, V) error) (err break } att2 := atts2[i] - if reflect.ValueOf(att.Ref).IsValid() && reflect.ValueOf(att2.Ref).IsValid() { + if att.Ref != zeroU && att2.Ref != zeroV { if err1 := fn(att.Ref, att2.Ref); err1 != nil && err == nil { err = err1 } @@ -137,11 +142,13 @@ func EachRef[U any, V any](a *Result[U], b *Result[V], fn func(U, V) error) (err return err } -func ConvertResult[U any, V any](r *Result[U], fn func(U) (V, error)) (*Result[V], error) { +func ConvertResult[U comparable, V comparable](r *Result[U], fn func(U) (V, error)) (*Result[V], error) { + var zero U + r2 := &Result[V]{} var err error - if reflect.ValueOf(r.Ref).IsValid() { + if r.Ref != zero { r2.Ref, err = fn(r.Ref) if err != nil { return nil, err @@ -152,7 +159,7 @@ func ConvertResult[U any, V any](r *Result[U], fn func(U) (V, error)) (*Result[V r2.Refs = map[string]V{} } for k, r := range r.Refs { - if !reflect.ValueOf(r).IsValid() { + if r == zero { continue } r2.Refs[k], err = fn(r) diff --git a/vendor/github.com/moby/buildkit/solver/types.go b/vendor/github.com/moby/buildkit/solver/types.go index 6635daef0e..01b344a3af 100644 --- a/vendor/github.com/moby/buildkit/solver/types.go +++ b/vendor/github.com/moby/buildkit/solver/types.go @@ -241,7 +241,7 @@ type CacheManager interface { // Query searches for cache paths from one cache key to the output of a // possible match. Query(inp []CacheKeyWithSelector, inputIndex Index, dgst digest.Digest, outputIndex Index) ([]*CacheKey, error) - Records(ck *CacheKey) ([]*CacheRecord, error) + Records(ctx context.Context, ck *CacheKey) ([]*CacheRecord, error) // Load loads a cache record into a result reference. Load(ctx context.Context, rec *CacheRecord) (Result, error) diff --git a/vendor/github.com/moby/buildkit/source/containerimage/pull.go b/vendor/github.com/moby/buildkit/source/containerimage/pull.go index 509d2a9946..8792111aaa 100644 --- a/vendor/github.com/moby/buildkit/source/containerimage/pull.go +++ b/vendor/github.com/moby/buildkit/source/containerimage/pull.go @@ -60,9 +60,15 @@ type SourceOpt struct { LeaseManager leases.Manager } +type resolveImageResult struct { + ref string + dgst digest.Digest + dt []byte +} + type Source struct { SourceOpt - g flightcontrol.Group + g flightcontrol.Group[*resolveImageResult] } var _ source.Source = &Source{} @@ -82,12 +88,7 @@ func (is *Source) ID() string { return srctypes.DockerImageScheme } -func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) { - type t struct { - dgst digest.Digest - dt []byte - } - var typed *t +func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) { key := ref if platform := opt.Platform; platform != nil { key += platforms.Format(*platform) @@ -102,7 +103,7 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.Re case ResolverTypeRegistry: rm, err = source.ParseImageResolveMode(opt.ResolveMode) if err != nil { - return "", nil, err + return "", "", nil, err } rslvr = resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g).WithImageStore(is.ImageStore, rm) case ResolverTypeOCILayout: @@ -110,18 +111,17 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.Re rslvr = getOCILayoutResolver(opt.Store, sm, g) } key += rm.String() - res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) { - dgst, dt, err := imageutil.Config(ctx, ref, rslvr, is.ContentStore, is.LeaseManager, opt.Platform) + res, err := is.g.Do(ctx, key, func(ctx context.Context) (*resolveImageResult, error) { + newRef, dgst, dt, err := imageutil.Config(ctx, ref, rslvr, is.ContentStore, is.LeaseManager, opt.Platform, opt.SourcePolicies) if err != nil { return nil, err } - return &t{dgst: dgst, dt: dt}, nil + return &resolveImageResult{dgst: dgst, dt: dt, ref: newRef}, nil }) if err != nil { - return "", nil, err + return "", "", nil, err } - typed = res.(*t) - return typed.dgst, typed.dt, nil + return res.ref, res.dgst, res.dt, nil } func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) { @@ -205,7 +205,7 @@ type puller struct { ResolverType store llb.ResolveImageConfigOptStore - g flightcontrol.Group + g flightcontrol.Group[struct{}] cacheKeyErr error cacheKeyDone bool releaseTmpLeases func(context.Context) error @@ -255,9 +255,9 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach // be canceled before the progress output is complete progressFactory := progress.FromContext(ctx) - _, err = p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) { + _, err = p.g.Do(ctx, "", func(ctx context.Context) (_ struct{}, err error) { if p.cacheKeyErr != nil || p.cacheKeyDone { - return nil, p.cacheKeyErr + return struct{}{}, p.cacheKeyErr } defer func() { if !errdefs.IsCanceled(ctx, err) { @@ -266,7 +266,7 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach }() ctx, done, err := leaseutil.WithLease(ctx, p.LeaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary) if err != nil { - return nil, err + return struct{}{}, err } p.releaseTmpLeases = done defer imageutil.AddLease(done) @@ -278,12 +278,12 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach p.manifest, err = p.PullManifests(ctx, getResolver) if err != nil { - return nil, err + return struct{}{}, err } if ll := p.layerLimit; ll != nil { if *ll > len(p.manifest.Descriptors) { - return nil, errors.Errorf("layer limit %d is greater than the number of layers in the image %d", *ll, len(p.manifest.Descriptors)) + return struct{}{}, errors.Errorf("layer limit %d is greater than the number of layers in the image %d", *ll, len(p.manifest.Descriptors)) } p.manifest.Descriptors = p.manifest.Descriptors[:*ll] } @@ -320,21 +320,21 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach desc := p.manifest.MainManifestDesc k, err := mainManifestKey(ctx, desc, p.Platform, p.layerLimit) if err != nil { - return nil, err + return struct{}{}, err } p.manifestKey = k.String() dt, err := content.ReadBlob(ctx, p.ContentStore, p.manifest.ConfigDesc) if err != nil { - return nil, err + return struct{}{}, err } ck, err := cacheKeyFromConfig(dt, p.layerLimit) if err != nil { - return nil, err + return struct{}{}, err } p.configKey = ck.String() p.cacheKeyDone = true - return nil, nil + return struct{}{}, nil }) if err != nil { return "", "", nil, false, err diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource.go b/vendor/github.com/moby/buildkit/source/git/gitsource.go index dd35fe55f7..fdc1b50028 100644 --- a/vendor/github.com/moby/buildkit/source/git/gitsource.go +++ b/vendor/github.com/moby/buildkit/source/git/gitsource.go @@ -671,13 +671,14 @@ func git(ctx context.Context, dir, sshAuthSock, knownHosts string, args ...strin // "GIT_TRACE=1", "GIT_CONFIG_NOSYSTEM=1", // Disable reading from system gitconfig. "HOME=/dev/null", // Disable reading from user gitconfig. + "LC_ALL=C", // Ensure consistent output. } if sshAuthSock != "" { cmd.Env = append(cmd.Env, "SSH_AUTH_SOCK="+sshAuthSock) } // remote git commands spawn helper processes that inherit FDs and don't // handle parent death signal so exec.CommandContext can't be used - err := runProcessGroup(ctx, cmd) + err := runWithStandardUmask(ctx, cmd) if err != nil { if strings.Contains(errbuf.String(), "--depth") || strings.Contains(errbuf.String(), "shallow") { if newArgs := argsNoDepth(args); len(args) > len(newArgs) { diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go b/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go index cb49917573..142ae56091 100644 --- a/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go +++ b/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go @@ -5,80 +5,43 @@ package git import ( "context" - "os" "os/exec" - "os/signal" + "runtime" "syscall" "time" - "github.com/docker/docker/pkg/reexec" "golang.org/x/sys/unix" ) -const ( - gitCmd = "umask-git" -) +func runWithStandardUmask(ctx context.Context, cmd *exec.Cmd) error { + errCh := make(chan error) -func init() { - reexec.Register(gitCmd, gitMain) + go func() { + defer close(errCh) + runtime.LockOSThread() + + if err := unshareAndRun(ctx, cmd); err != nil { + errCh <- err + } + }() + + return <-errCh } -func gitMain() { - // Need standard user umask for git process. - unix.Umask(0022) +// unshareAndRun needs to be called in a locked thread. +func unshareAndRun(ctx context.Context, cmd *exec.Cmd) error { + if err := syscall.Unshare(syscall.CLONE_FS); err != nil { + return err + } + syscall.Umask(0022) + return runProcessGroup(ctx, cmd) +} - // Reexec git command - cmd := exec.Command(os.Args[1], os.Args[2:]...) //nolint:gosec // reexec +func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { cmd.SysProcAttr = &unix.SysProcAttr{ Setpgid: true, Pdeathsig: unix.SIGTERM, } - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Stdin = os.Stdin - - // Forward all signals - sigc := make(chan os.Signal, 1) - done := make(chan struct{}) - signal.Notify(sigc) - go func() { - for { - select { - case sig := <-sigc: - if cmd.Process == nil { - continue - } - switch sig { - case unix.SIGINT, unix.SIGTERM, unix.SIGKILL: - _ = unix.Kill(-cmd.Process.Pid, sig.(unix.Signal)) - default: - _ = cmd.Process.Signal(sig) - } - case <-done: - return - } - } - }() - - err := cmd.Run() - close(done) - if err != nil { - if exiterr, ok := err.(*exec.ExitError); ok { - switch status := exiterr.Sys().(type) { - case unix.WaitStatus: - os.Exit(status.ExitStatus()) - case syscall.WaitStatus: - os.Exit(status.ExitStatus()) - } - } - os.Exit(1) - } - os.Exit(0) -} - -func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { - cmd.Path = reexec.Self() - cmd.Args = append([]string{gitCmd}, cmd.Args...) if err := cmd.Start(); err != nil { return err } diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go b/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go index a1952ecb0c..8c8a1d3dcf 100644 --- a/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go +++ b/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go @@ -8,7 +8,7 @@ import ( "os/exec" ) -func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { +func runWithStandardUmask(ctx context.Context, cmd *exec.Cmd) error { if err := cmd.Start(); err != nil { return err } diff --git a/vendor/github.com/moby/buildkit/source/http/httpsource.go b/vendor/github.com/moby/buildkit/source/http/httpsource.go index 968c635651..9fde3cdee7 100644 --- a/vendor/github.com/moby/buildkit/source/http/httpsource.go +++ b/vendor/github.com/moby/buildkit/source/http/httpsource.go @@ -178,6 +178,9 @@ func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, inde // manual ETag value comparison. if len(m) > 0 { req.Method = "HEAD" + // we need to add accept-encoding header manually because stdlib only adds it to GET requests + // some servers will return different etags if Accept-Encoding header is different + req.Header.Add("Accept-Encoding", "gzip") resp, err := client.Do(req) if err == nil { if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotModified { @@ -202,6 +205,10 @@ func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, inde resp.Body.Close() } req.Method = "GET" + // Unset explicit Accept-Encoding for GET, otherwise the go http library will not + // transparently decompress the response body when it is gzipped. It will still add + // this header implicitly when the request is made though. + req.Header.Del("Accept-Encoding") } resp, err := client.Do(req) @@ -392,6 +399,9 @@ func (hs *httpSourceHandler) Snapshot(ctx context.Context, g session.Group) (cac if err != nil { return nil, err } + defer func() { + _ = resp.Body.Close() + }() ref, dgst, err := hs.save(ctx, resp, g) if err != nil { diff --git a/vendor/github.com/moby/buildkit/sourcepolicy/engine.go b/vendor/github.com/moby/buildkit/sourcepolicy/engine.go index 829e851065..8515b276a4 100644 --- a/vendor/github.com/moby/buildkit/sourcepolicy/engine.go +++ b/vendor/github.com/moby/buildkit/sourcepolicy/engine.go @@ -7,6 +7,7 @@ import ( spb "github.com/moby/buildkit/sourcepolicy/pb" "github.com/moby/buildkit/util/bklog" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) var ( @@ -112,12 +113,20 @@ func (e *Engine) evaluatePolicies(ctx context.Context, srcOp *pb.SourceOp) (bool // // For Allow/Deny rules, the last matching rule wins. // E.g. `ALLOW foo; DENY foo` will deny `foo`, `DENY foo; ALLOW foo` will allow `foo`. -func (e *Engine) evaluatePolicy(ctx context.Context, pol *spb.Policy, srcOp *pb.SourceOp) (bool, error) { +func (e *Engine) evaluatePolicy(ctx context.Context, pol *spb.Policy, srcOp *pb.SourceOp) (retMut bool, retErr error) { ident := srcOp.GetIdentifier() - ctx = bklog.WithLogger(ctx, bklog.G(ctx).WithFields(map[string]interface{}{ - "ref": ident, - })) + ctx = bklog.WithLogger(ctx, bklog.G(ctx).WithField("ref", ident)) + defer func() { + if retMut || retErr != nil { + bklog.G(ctx).WithFields( + logrus.Fields{ + "mutated": retMut, + "updated": srcOp.GetIdentifier(), + logrus.ErrorKey: retErr, + }).Debug("Evaluated source policy") + } + }() var deny bool for _, rule := range pol.Rules { diff --git a/vendor/github.com/moby/buildkit/util/archutil/Dockerfile b/vendor/github.com/moby/buildkit/util/archutil/Dockerfile index 9f8e59d9db..2b24b230b3 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/Dockerfile +++ b/vendor/github.com/moby/buildkit/util/archutil/Dockerfile @@ -52,7 +52,7 @@ FROM base AS exit-mips64 COPY fixtures/exit.mips64.s . RUN mips64-linux-gnuabi64-as --noexecstack -o exit.o exit.mips64.s && mips64-linux-gnuabi64-ld -o exit -s exit.o -FROM golang:1.19-alpine AS generate +FROM golang:1.20-alpine AS generate WORKDIR /src COPY --from=exit-amd64 /src/exit amd64 COPY --from=exit-386 /src/exit 386 diff --git a/vendor/github.com/moby/buildkit/util/archutil/detect.go b/vendor/github.com/moby/buildkit/util/archutil/detect.go index 3184f9e548..7826441271 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/detect.go +++ b/vendor/github.com/moby/buildkit/util/archutil/detect.go @@ -6,8 +6,8 @@ import ( "sync" "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/util/bklog" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" ) var mu sync.Mutex @@ -181,10 +181,10 @@ func amd64vector(v string) (out []string) { func printPlatformWarning(p ocispecs.Platform, err error) { if strings.Contains(err.Error(), "exec format error") { - logrus.Warnf("platform %s cannot pass the validation, kernel support for miscellaneous binary may have not enabled.", platforms.Format(p)) + bklog.L.Warnf("platform %s cannot pass the validation, kernel support for miscellaneous binary may have not enabled.", platforms.Format(p)) } else if strings.Contains(err.Error(), "no such file or directory") { - logrus.Warnf("platforms %s cannot pass the validation, '-F' flag might have not set for 'archutil'.", platforms.Format(p)) + bklog.L.Warnf("platforms %s cannot pass the validation, '-F' flag might have not set for 'archutil'.", platforms.Format(p)) } else { - logrus.Warnf("platforms %s cannot pass the validation: %s", platforms.Format(p), err.Error()) + bklog.L.Warnf("platforms %s cannot pass the validation: %s", platforms.Format(p), err.Error()) } } diff --git a/vendor/github.com/moby/buildkit/util/attestation/types.go b/vendor/github.com/moby/buildkit/util/attestation/types.go index 35f4404cd6..accccd307e 100644 --- a/vendor/github.com/moby/buildkit/util/attestation/types.go +++ b/vendor/github.com/moby/buildkit/util/attestation/types.go @@ -1,8 +1,6 @@ package attestation const ( - MediaTypeDockerSchema2AttestationType = "application/vnd.in-toto+json" - DockerAnnotationReferenceType = "vnd.docker.reference.type" DockerAnnotationReferenceDigest = "vnd.docker.reference.digest" DockerAnnotationReferenceDescription = "vnd.docker.reference.description" diff --git a/vendor/github.com/moby/buildkit/util/bklog/log.go b/vendor/github.com/moby/buildkit/util/bklog/log.go index d7f202210d..7d0b1d90df 100644 --- a/vendor/github.com/moby/buildkit/util/bklog/log.go +++ b/vendor/github.com/moby/buildkit/util/bklog/log.go @@ -2,6 +2,7 @@ package bklog import ( "context" + "runtime/debug" "github.com/containerd/containerd/log" "github.com/sirupsen/logrus" @@ -61,3 +62,15 @@ func GetLogger(ctx context.Context) (l *logrus.Entry) { return l } + +// LazyStackTrace lets you include a stack trace as a field's value in a log but only +// call it when the log level is actually enabled. +type LazyStackTrace struct{} + +func (LazyStackTrace) String() string { + return string(debug.Stack()) +} + +func (LazyStackTrace) MarshalText() ([]byte, error) { + return debug.Stack(), nil +} diff --git a/vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go b/vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go deleted file mode 100644 index e3486e8e4f..0000000000 --- a/vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go +++ /dev/null @@ -1,452 +0,0 @@ -// Package buildinfo implements utilities for build information. -// -// Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md -package buildinfo - -import ( - "context" - "encoding/base64" - "encoding/json" - "sort" - "strings" - - ctnref "github.com/containerd/containerd/reference" - "github.com/docker/distribution/reference" - "github.com/moby/buildkit/exporter/containerimage/exptypes" - "github.com/moby/buildkit/solver/llbsolver/provenance" - "github.com/moby/buildkit/source" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" - "github.com/moby/buildkit/util/urlutil" - "github.com/pkg/errors" -) - -func FromProvenance(c *provenance.Capture) (*binfotypes.BuildInfo, error) { - var bi binfotypes.BuildInfo - - bi.Frontend = c.Frontend - bi.Attrs = map[string]*string{} - for k, v := range c.Args { - v := v - bi.Attrs[k] = &v - } - - for _, s := range c.Sources.Images { - bi.Sources = append(bi.Sources, binfotypes.Source{ - Type: binfotypes.SourceTypeDockerImage, - Ref: s.Ref, - Pin: s.Digest.String(), - }) - } - - for _, s := range c.Sources.HTTP { - bi.Sources = append(bi.Sources, binfotypes.Source{ - Type: binfotypes.SourceTypeHTTP, - Ref: s.URL, - Pin: s.Digest.String(), - }) - } - - for _, s := range c.Sources.Git { - bi.Sources = append(bi.Sources, binfotypes.Source{ - Type: binfotypes.SourceTypeGit, - Ref: s.URL, - Pin: s.Commit, - }) - } - - sort.Slice(bi.Sources, func(i, j int) bool { - return bi.Sources[i].Ref < bi.Sources[j].Ref - }) - - return &bi, nil -} - -func AddMetadata(metadata map[string][]byte, key string, c *provenance.Capture) error { - bi, err := FromProvenance(c) - if err != nil { - return err - } - dt, err := json.Marshal(bi) - if err != nil { - return err - } - metadata[key] = dt - return nil -} - -// Decode decodes a base64 encoded build info. -func Decode(enc string) (bi binfotypes.BuildInfo, _ error) { - dec, err := base64.StdEncoding.DecodeString(enc) - if err != nil { - return bi, err - } - err = json.Unmarshal(dec, &bi) - return bi, err -} - -// Encode encodes build info. -func Encode(ctx context.Context, metadata map[string][]byte, key string, llbSources map[string]string) ([]byte, error) { - var bi binfotypes.BuildInfo - if metadata == nil { - metadata = make(map[string][]byte) - } - if v, ok := metadata[key]; ok && v != nil { - if err := json.Unmarshal(v, &bi); err != nil { - return nil, err - } - } - if sources, err := mergeSources(llbSources, bi.Sources); err == nil { - bi.Sources = sources - } else { - return nil, err - } - bi.Sources = dedupSources(bi.Sources, allDepsSources(bi.Deps, nil)) - return json.Marshal(bi) -} - -// mergeSources combines and fixes build sources from frontend sources. -func mergeSources(llbSources map[string]string, frontendSources []binfotypes.Source) ([]binfotypes.Source, error) { - if llbSources == nil { - llbSources = make(map[string]string) - } - // iterate and combine build sources - mbs := map[string]binfotypes.Source{} - for llbSource, pin := range llbSources { - src, err := source.FromString(llbSource) - if err != nil { - return nil, err - } - switch sourceID := src.(type) { - case *source.ImageIdentifier: - for i, fsrc := range frontendSources { - if fsrc.Type != binfotypes.SourceTypeDockerImage { - continue - } - // use original user input from frontend sources - if fsrc.Alias == sourceID.Reference.String() || fsrc.Pin == pin { - if fsrc.Alias == "" { - fsrc.Alias = sourceID.Reference.String() - } - parsed, err := reference.ParseNormalizedNamed(fsrc.Ref) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse %s", fsrc.Ref) - } - mbs[fsrc.Alias] = binfotypes.Source{ - Type: binfotypes.SourceTypeDockerImage, - Ref: reference.TagNameOnly(parsed).String(), - Pin: pin, - } - frontendSources = append(frontendSources[:i], frontendSources[i+1:]...) - break - } - } - if _, ok := mbs[sourceID.Reference.String()]; !ok { - mbs[sourceID.Reference.String()] = binfotypes.Source{ - Type: binfotypes.SourceTypeDockerImage, - Ref: sourceID.Reference.String(), - Pin: pin, - } - } - case *source.GitIdentifier: - sref := sourceID.Remote - if len(sourceID.Ref) > 0 { - sref += "#" + sourceID.Ref - } - if len(sourceID.Subdir) > 0 { - sref += ":" + sourceID.Subdir - } - if _, ok := mbs[sref]; !ok { - mbs[sref] = binfotypes.Source{ - Type: binfotypes.SourceTypeGit, - Ref: urlutil.RedactCredentials(sref), - Pin: pin, - } - } - case *source.HTTPIdentifier: - if _, ok := mbs[sourceID.URL]; !ok { - mbs[sourceID.URL] = binfotypes.Source{ - Type: binfotypes.SourceTypeHTTP, - Ref: urlutil.RedactCredentials(sourceID.URL), - Pin: pin, - } - } - } - } - - // leftover sources in frontend. Mostly duplicated ones we don't need but - // there is an edge case if no instruction except sources one is defined - // (e.g. FROM ...) that can be valid so take it into account. - for _, fsrc := range frontendSources { - if fsrc.Type != binfotypes.SourceTypeDockerImage { - continue - } - if _, ok := mbs[fsrc.Alias]; !ok { - parsed, err := reference.ParseNormalizedNamed(fsrc.Ref) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse %s", fsrc.Ref) - } - mbs[fsrc.Alias] = binfotypes.Source{ - Type: binfotypes.SourceTypeDockerImage, - Ref: reference.TagNameOnly(parsed).String(), - Pin: fsrc.Pin, - } - } - } - - srcs := make([]binfotypes.Source, 0, len(mbs)) - for _, bs := range mbs { - srcs = append(srcs, bs) - } - sort.Slice(srcs, func(i, j int) bool { - return srcs[i].Ref < srcs[j].Ref - }) - - return srcs, nil -} - -// decodeDeps decodes dependencies (buildinfo) added via the input context. -func decodeDeps(key string, attrs map[string]*string) (map[string]binfotypes.BuildInfo, error) { - var platform string - // extract platform from metadata key - if skey := strings.SplitN(key, "/", 2); len(skey) == 2 { - platform = skey[1] - } - - res := make(map[string]binfotypes.BuildInfo) - for k, v := range attrs { - // dependencies are only handled via the input context - if v == nil || !strings.HasPrefix(k, "input-metadata:") { - continue - } - - // if platform is defined in the key, only decode dependencies - // for that platform and vice versa - hasPlatform := len(strings.SplitN(k, "::", 2)) == 2 - if (platform != "" && !hasPlatform) || (platform == "" && hasPlatform) { - continue - } - - // decode input metadata - var inputresp map[string]string - if err := json.Unmarshal([]byte(*v), &inputresp); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal input-metadata") - } - - // check buildinfo key is present - if _, ok := inputresp[exptypes.ExporterBuildInfo]; !ok { - continue - } - - // decode buildinfo - bi, err := Decode(inputresp[exptypes.ExporterBuildInfo]) - if err != nil { - return nil, errors.Wrap(err, "failed to decode buildinfo from input-metadata") - } - - // set dep key - var depkey string - kl := strings.SplitN(k, ":", 2) - if len(kl) != 2 { - continue - } - depkey = strings.SplitN(kl[1], "::", 2)[0] - if platform != "" { - depkey = strings.TrimSuffix(depkey, "::"+platform) - } - - res[depkey] = bi - } - if len(res) == 0 { - return nil, nil - } - return res, nil -} - -// dedupSources deduplicates regular sources from dependencies ones. -func dedupSources(sources []binfotypes.Source, depsSources []binfotypes.Source) (srcs []binfotypes.Source) { - // dedup sources from deps - msrc := make(map[binfotypes.Source]struct{}) -sourcesloop: - for _, src := range sources { - for _, srcd := range depsSources { - if src == srcd { - continue sourcesloop - } - if src.Type == binfotypes.SourceTypeDockerImage && srcd.Type == binfotypes.SourceTypeDockerImage { - _, dgst := ctnref.SplitObject(src.Ref) - if dgst != "" && src.Pin == srcd.Pin { - continue sourcesloop - } - } - } - if _, ok := msrc[src]; !ok { - msrc[src] = struct{}{} - } - } - for src := range msrc { - srcs = append(srcs, src) - } - sort.Slice(srcs, func(i, j int) bool { - return srcs[i].Ref < srcs[j].Ref - }) - return srcs -} - -// allDepsSources gathers dependencies sources. -func allDepsSources(deps map[string]binfotypes.BuildInfo, visited map[binfotypes.Source]struct{}) (res []binfotypes.Source) { - if visited == nil { - visited = make(map[binfotypes.Source]struct{}) - } - if len(deps) == 0 { - return res - } - for _, dbi := range deps { - for _, dsrc := range dbi.Sources { - if _, ok := visited[dsrc]; ok { - continue - } - visited[dsrc] = struct{}{} - } - res = allDepsSources(dbi.Deps, visited) - } - for src := range visited { - res = append(res, src) - } - return res -} - -// FormatOpts holds build info format options. -type FormatOpts struct { - RemoveAttrs bool -} - -// Format formats build info. -func Format(dt []byte, opts FormatOpts) (_ []byte, err error) { - if len(dt) == 0 { - return dt, nil - } - - var bi binfotypes.BuildInfo - if err := json.Unmarshal(dt, &bi); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal buildinfo for formatting") - } - - if opts.RemoveAttrs { - bi.Attrs = nil - if len(bi.Deps) > 0 { - bi.Sources = dedupSources(append(bi.Sources, allDepsSources(bi.Deps, nil)...), nil) - bi.Deps = nil - } - } - - if dt, err = json.Marshal(bi); err != nil { - return nil, err - } - return dt, nil -} - -var knownAttrs = []string{ - //"cmdline", - "context", - "filename", - "source", - - //"add-hosts", - //"cgroup-parent", - //"force-network-mode", - //"hostname", - //"image-resolve-mode", - //"platform", - "shm-size", - "target", - "ulimit", -} - -// filterAttrs filters frontent opt by picking only those that -// could effectively change the build result. -func filterAttrs(key string, attrs map[string]*string) map[string]*string { - var platform string - // extract platform from metadata key - skey := strings.SplitN(key, "/", 2) - if len(skey) == 2 { - platform = skey[1] - } - filtered := make(map[string]*string) - for k, v := range attrs { - if v == nil { - continue - } - // control args are filtered out - if isControlArg(k) { - continue - } - // always include - if strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") || strings.HasPrefix(k, "vcs:") { - filtered[k] = v - continue - } - // input context key and value has to be cleaned up - // before being included - if strings.HasPrefix(k, "context:") { - ctxkey := strings.SplitN(k, "::", 2) - hasCtxPlatform := len(ctxkey) == 2 - // if platform is set and also defined in key, set context - // for the right one. - if hasCtxPlatform && platform != "" && platform != ctxkey[1] { - continue - } - if platform == "" && hasCtxPlatform { - ctxval := strings.TrimSuffix(*v, "::"+ctxkey[1]) - filtered[strings.TrimSuffix(k, "::"+ctxkey[1])] = &ctxval - continue - } - ctxival := strings.TrimSuffix(*v, "::"+platform) - filtered[strings.TrimSuffix(k, "::"+platform)] = &ctxival - continue - } - // filter only for known attributes - for _, knownAttr := range knownAttrs { - if knownAttr == k { - filtered[k] = v - break - } - } - } - return filtered -} - -var knownControlArgs = []string{ - "BUILDKIT_CACHE_MOUNT_NS", - "BUILDKIT_CONTEXT_KEEP_GIT_DIR", - "BUILDKIT_BUILDINFO", - "BUILDKIT_INLINE_BUILDINFO_ATTRS", - "BUILDKIT_INLINE_CACHE", - "BUILDKIT_MULTI_PLATFORM", - "BUILDKIT_SANDBOX_HOSTNAME", - "BUILDKIT_SYNTAX", -} - -// isControlArg checks if a build attributes is a control arg -func isControlArg(attrKey string) bool { - for _, k := range knownControlArgs { - if strings.HasPrefix(attrKey, "build-arg:"+k) { - return true - } - } - return false -} - -func reduceMapString(m1 map[string]string, m2 map[string]*string) map[string]string { - if m1 == nil && m2 == nil { - return nil - } - if m1 == nil { - m1 = map[string]string{} - } - for k, v := range m2 { - if v != nil { - m1[k] = *v - } - } - return m1 -} diff --git a/vendor/github.com/moby/buildkit/util/buildinfo/types/types.go b/vendor/github.com/moby/buildkit/util/buildinfo/types/types.go deleted file mode 100644 index 06cf09681e..0000000000 --- a/vendor/github.com/moby/buildkit/util/buildinfo/types/types.go +++ /dev/null @@ -1,55 +0,0 @@ -// Package binfotypes implements types for build information. -// -// Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md -package binfotypes - -import ( - srctypes "github.com/moby/buildkit/source/types" -) - -// ImageConfigField defines the key of build dependencies. -const ImageConfigField = "moby.buildkit.buildinfo.v1" - -// ImageConfig defines the structure of build dependencies -// inside image config. -type ImageConfig struct { - BuildInfo string `json:"moby.buildkit.buildinfo.v1,omitempty"` -} - -// BuildInfo defines the main structure added to image config as -// ImageConfigField key and returned in solver ExporterResponse as -// exptypes.ExporterBuildInfo key. -type BuildInfo struct { - // Frontend defines the frontend used to build. - Frontend string `json:"frontend,omitempty"` - // Attrs defines build request attributes. - Attrs map[string]*string `json:"attrs,omitempty"` - // Sources defines build dependencies. - Sources []Source `json:"sources,omitempty"` - // Deps defines context dependencies. - Deps map[string]BuildInfo `json:"deps,omitempty"` -} - -// Source defines a build dependency. -type Source struct { - // Type defines the SourceType source type (docker-image, git, http). - Type SourceType `json:"type,omitempty"` - // Ref is the reference of the source. - Ref string `json:"ref,omitempty"` - // Alias is a special field used to match with the actual source ref - // because frontend might have already transformed a string user typed - // before generating LLB. - Alias string `json:"alias,omitempty"` - // Pin is the source digest. - Pin string `json:"pin,omitempty"` -} - -// SourceType contains source type. -type SourceType string - -// List of source types. -const ( - SourceTypeDockerImage SourceType = srctypes.DockerImageScheme - SourceTypeGit SourceType = srctypes.GitScheme - SourceTypeHTTP SourceType = srctypes.HTTPScheme -) diff --git a/vendor/github.com/moby/buildkit/util/compression/attrs.go b/vendor/github.com/moby/buildkit/util/compression/attrs.go new file mode 100644 index 0000000000..1f986d3712 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/compression/attrs.go @@ -0,0 +1,48 @@ +package compression + +import ( + "strconv" + + "github.com/pkg/errors" +) + +const ( + attrLayerCompression = "compression" + attrForceCompression = "force-compression" + attrCompressionLevel = "compression-level" +) + +func ParseAttributes(attrs map[string]string) (Config, error) { + var compressionType Type + if v, ok := attrs[attrLayerCompression]; ok { + c, err := Parse(v) + if err != nil { + return Config{}, err + } + compressionType = c + } else { + compressionType = Default + } + compressionConfig := New(compressionType) + if v, ok := attrs[attrForceCompression]; ok { + var force bool + if v == "" { + force = true + } else { + b, err := strconv.ParseBool(v) + if err != nil { + return Config{}, errors.Wrapf(err, "non-bool value %s specified for %s", v, attrForceCompression) + } + force = b + } + compressionConfig = compressionConfig.SetForce(force) + } + if v, ok := attrs[attrCompressionLevel]; ok { + ii, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return Config{}, errors.Wrapf(err, "non-integer value %s specified for %s", v, attrCompressionLevel) + } + compressionConfig = compressionConfig.SetLevel(int(ii)) + } + return compressionConfig, nil +} diff --git a/vendor/github.com/moby/buildkit/util/compression/compression.go b/vendor/github.com/moby/buildkit/util/compression/compression.go index cfc26b9078..8398bfb299 100644 --- a/vendor/github.com/moby/buildkit/util/compression/compression.go +++ b/vendor/github.com/moby/buildkit/util/compression/compression.go @@ -9,11 +9,11 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" "github.com/containerd/stargz-snapshotter/estargz" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/iohelper" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type Compressor func(dest io.Writer, mediaType string) (io.WriteCloser, error) @@ -78,10 +78,9 @@ func (c Config) SetLevel(l int) Config { const ( mediaTypeDockerSchema2LayerZstd = images.MediaTypeDockerSchema2Layer + ".zstd" - mediaTypeImageLayerZstd = ocispecs.MediaTypeImageLayer + "+zstd" // unreleased image-spec#790 ) -var Default gzipType = Gzip +var Default = Gzip func parse(t string) (Type, error) { switch t { @@ -100,11 +99,11 @@ func parse(t string) (Type, error) { func fromMediaType(mediaType string) (Type, error) { switch toOCILayerType[mediaType] { - case ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerNonDistributable: + case ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return Uncompressed, nil - case ocispecs.MediaTypeImageLayerGzip, ocispecs.MediaTypeImageLayerNonDistributableGzip: + case ocispecs.MediaTypeImageLayerGzip, ocispecs.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return Gzip, nil - case mediaTypeImageLayerZstd, ocispecs.MediaTypeImageLayerNonDistributableZstd: + case ocispecs.MediaTypeImageLayerZstd, ocispecs.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return Zstd, nil default: return nil, errors.Errorf("unsupported media type %s", mediaType) @@ -191,27 +190,27 @@ var toDockerLayerType = map[string]string{ images.MediaTypeDockerSchema2LayerGzip: images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2LayerForeign: images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip: images.MediaTypeDockerSchema2LayerForeignGzip, - ocispecs.MediaTypeImageLayerNonDistributable: images.MediaTypeDockerSchema2LayerForeign, - ocispecs.MediaTypeImageLayerNonDistributableGzip: images.MediaTypeDockerSchema2LayerForeignGzip, - mediaTypeImageLayerZstd: mediaTypeDockerSchema2LayerZstd, + ocispecs.MediaTypeImageLayerNonDistributable: images.MediaTypeDockerSchema2LayerForeign, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. + ocispecs.MediaTypeImageLayerNonDistributableGzip: images.MediaTypeDockerSchema2LayerForeignGzip, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. + ocispecs.MediaTypeImageLayerZstd: mediaTypeDockerSchema2LayerZstd, mediaTypeDockerSchema2LayerZstd: mediaTypeDockerSchema2LayerZstd, } var toOCILayerType = map[string]string{ ocispecs.MediaTypeImageLayer: ocispecs.MediaTypeImageLayer, - ocispecs.MediaTypeImageLayerNonDistributable: ocispecs.MediaTypeImageLayerNonDistributable, - ocispecs.MediaTypeImageLayerNonDistributableGzip: ocispecs.MediaTypeImageLayerNonDistributableGzip, - ocispecs.MediaTypeImageLayerNonDistributableZstd: ocispecs.MediaTypeImageLayerNonDistributableZstd, + ocispecs.MediaTypeImageLayerNonDistributable: ocispecs.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. + ocispecs.MediaTypeImageLayerNonDistributableGzip: ocispecs.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. + ocispecs.MediaTypeImageLayerNonDistributableZstd: ocispecs.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. images.MediaTypeDockerSchema2Layer: ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerGzip: ocispecs.MediaTypeImageLayerGzip, images.MediaTypeDockerSchema2LayerGzip: ocispecs.MediaTypeImageLayerGzip, - images.MediaTypeDockerSchema2LayerForeign: ocispecs.MediaTypeImageLayerNonDistributable, - images.MediaTypeDockerSchema2LayerForeignGzip: ocispecs.MediaTypeImageLayerNonDistributableGzip, - mediaTypeImageLayerZstd: mediaTypeImageLayerZstd, - mediaTypeDockerSchema2LayerZstd: mediaTypeImageLayerZstd, + images.MediaTypeDockerSchema2LayerForeign: ocispecs.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. + images.MediaTypeDockerSchema2LayerForeignGzip: ocispecs.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. + ocispecs.MediaTypeImageLayerZstd: ocispecs.MediaTypeImageLayerZstd, + mediaTypeDockerSchema2LayerZstd: ocispecs.MediaTypeImageLayerZstd, } -func convertLayerMediaType(mediaType string, oci bool) string { +func convertLayerMediaType(ctx context.Context, mediaType string, oci bool) string { var converted string if oci { converted = toOCILayerType[mediaType] @@ -219,16 +218,16 @@ func convertLayerMediaType(mediaType string, oci bool) string { converted = toDockerLayerType[mediaType] } if converted == "" { - logrus.Warnf("unhandled conversion for mediatype %q", mediaType) + bklog.G(ctx).Warnf("unhandled conversion for mediatype %q", mediaType) return mediaType } return converted } -func ConvertAllLayerMediaTypes(oci bool, descs ...ocispecs.Descriptor) []ocispecs.Descriptor { +func ConvertAllLayerMediaTypes(ctx context.Context, oci bool, descs ...ocispecs.Descriptor) []ocispecs.Descriptor { var converted []ocispecs.Descriptor for _, desc := range descs { - desc.MediaType = convertLayerMediaType(desc.MediaType, oci) + desc.MediaType = convertLayerMediaType(ctx, desc.MediaType, oci) converted = append(converted, desc) } return converted diff --git a/vendor/github.com/moby/buildkit/util/compression/zstd.go b/vendor/github.com/moby/buildkit/util/compression/zstd.go index f18872199f..e7de6a21c3 100644 --- a/vendor/github.com/moby/buildkit/util/compression/zstd.go +++ b/vendor/github.com/moby/buildkit/util/compression/zstd.go @@ -47,7 +47,7 @@ func (c zstdType) NeedsForceCompression() bool { } func (c zstdType) MediaType() string { - return mediaTypeImageLayerZstd + return ocispecs.MediaTypeImageLayerZstd } func (c zstdType) String() string { diff --git a/vendor/github.com/moby/buildkit/util/contentutil/copy.go b/vendor/github.com/moby/buildkit/util/contentutil/copy.go index 5039bd0c20..22ef70c12f 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/copy.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/copy.go @@ -15,6 +15,7 @@ import ( ) func Copy(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispecs.Descriptor, ref string, logger func([]byte)) error { + ctx = RegisterContentPayloadTypes(ctx) if _, err := retryhandler.New(limited.FetchHandler(ingester, &localFetcher{provider}, ref), logger)(ctx, desc); err != nil { return err } @@ -60,6 +61,7 @@ func (r *rc) Seek(offset int64, whence int) (int64, error) { } func CopyChain(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispecs.Descriptor) error { + ctx = RegisterContentPayloadTypes(ctx) var m sync.Mutex manifestStack := []ocispecs.Descriptor{} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/refs.go b/vendor/github.com/moby/buildkit/util/contentutil/refs.go index 16fb9aafa5..d7d0b5bbe9 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/refs.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/refs.go @@ -20,7 +20,6 @@ func ProviderFromRef(ref string) (ocispecs.Descriptor, content.Provider, error) headers := http.Header{} headers.Set("User-Agent", version.UserAgent()) remote := docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, Headers: headers, }) @@ -40,7 +39,6 @@ func IngesterFromRef(ref string) (content.Ingester, error) { headers := http.Header{} headers.Set("User-Agent", version.UserAgent()) remote := docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, Headers: headers, }) diff --git a/vendor/github.com/moby/buildkit/util/contentutil/types.go b/vendor/github.com/moby/buildkit/util/contentutil/types.go new file mode 100644 index 0000000000..19dfb65408 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/contentutil/types.go @@ -0,0 +1,15 @@ +package contentutil + +import ( + "context" + + "github.com/containerd/containerd/remotes" + intoto "github.com/in-toto/in-toto-golang/in_toto" +) + +// RegisterContentPayloadTypes registers content types that are not defined by +// default but that we expect to find in registry images. +func RegisterContentPayloadTypes(ctx context.Context) context.Context { + ctx = remotes.WithMediaTypeKeyPrefix(ctx, intoto.PayloadType, "intoto") + return ctx +} diff --git a/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go b/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go index c53a24b865..9ab9398013 100644 --- a/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go +++ b/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go @@ -10,16 +10,16 @@ import ( "github.com/containerd/containerd/oci" "github.com/containerd/containerd/pkg/cap" "github.com/containerd/containerd/pkg/userns" + "github.com/moby/buildkit/util/bklog" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) // WithInsecureSpec sets spec with All capability. func WithInsecureSpec() oci.SpecOpts { - return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { - addCaps, err := getAllCaps() + return func(ctx context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { + addCaps, err := getAllCaps(ctx) if err != nil { return err } @@ -96,7 +96,7 @@ func WithInsecureSpec() oci.SpecOpts { loopID, err := getFreeLoopID() if err != nil { - logrus.Debugf("failed to get next free loop device: %v", err) + bklog.G(ctx).Debugf("failed to get next free loop device: %v", err) } for i := 0; i <= loopID+7; i++ { @@ -142,7 +142,7 @@ func getCurrentCaps() ([]string, error) { return currentCaps, currentCapsError } -func getAllCaps() ([]string, error) { +func getAllCaps(ctx context.Context) ([]string, error) { availableCaps, err := getCurrentCaps() if err != nil { return nil, errors.Errorf("error getting current capabilities: %s", err) @@ -152,7 +152,7 @@ func getAllCaps() ([]string, error) { // they are either not supported by the kernel or dropped at the process level for _, cap := range availableCaps { if _, exists := linux35Caps[cap]; !exists { - logrus.Warnf("capability %s could not be granted for insecure mode", cap) + bklog.G(ctx).Warnf("capability %s could not be granted for insecure mode", cap) } } diff --git a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go index 3c1b673e15..82ed25205f 100644 --- a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go +++ b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go @@ -25,13 +25,13 @@ type contextKeyT string var contextKey = contextKeyT("buildkit/util/flightcontrol.progress") // Group is a flightcontrol synchronization group -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized +type Group[T any] struct { + mu sync.Mutex // protects m + m map[string]*call[T] // lazily initialized } // Do executes a context function syncronized by the key -func (g *Group) Do(ctx context.Context, key string, fn func(ctx context.Context) (interface{}, error)) (v interface{}, err error) { +func (g *Group[T]) Do(ctx context.Context, key string, fn func(ctx context.Context) (T, error)) (v T, err error) { var backoff time.Duration for { v, err = g.do(ctx, key, fn) @@ -53,10 +53,10 @@ func (g *Group) Do(ctx context.Context, key string, fn func(ctx context.Context) } } -func (g *Group) do(ctx context.Context, key string, fn func(ctx context.Context) (interface{}, error)) (interface{}, error) { +func (g *Group[T]) do(ctx context.Context, key string, fn func(ctx context.Context) (T, error)) (T, error) { g.mu.Lock() if g.m == nil { - g.m = make(map[string]*call) + g.m = make(map[string]*call[T]) } if c, ok := g.m[key]; ok { // register 2nd waiter @@ -78,16 +78,16 @@ func (g *Group) do(ctx context.Context, key string, fn func(ctx context.Context) return c.wait(ctx) } -type call struct { +type call[T any] struct { mu sync.Mutex - result interface{} + result T err error ready chan struct{} cleaned chan struct{} - ctx *sharedContext + ctx *sharedContext[T] ctxs []context.Context - fn func(ctx context.Context) (interface{}, error) + fn func(ctx context.Context) (T, error) once sync.Once closeProgressWriter func() @@ -95,8 +95,8 @@ type call struct { progressCtx context.Context } -func newCall(fn func(ctx context.Context) (interface{}, error)) *call { - c := &call{ +func newCall[T any](fn func(ctx context.Context) (T, error)) *call[T] { + c := &call[T]{ fn: fn, ready: make(chan struct{}), cleaned: make(chan struct{}), @@ -114,7 +114,7 @@ func newCall(fn func(ctx context.Context) (interface{}, error)) *call { return c } -func (c *call) run() { +func (c *call[T]) run() { defer c.closeProgressWriter() ctx, cancel := context.WithCancel(c.ctx) defer cancel() @@ -126,7 +126,8 @@ func (c *call) run() { close(c.ready) } -func (c *call) wait(ctx context.Context) (v interface{}, err error) { +func (c *call[T]) wait(ctx context.Context) (v T, err error) { + var empty T c.mu.Lock() // detect case where caller has just returned, let it clean up before select { @@ -134,7 +135,7 @@ func (c *call) wait(ctx context.Context) (v interface{}, err error) { c.mu.Unlock() if c.err != nil { // on error retry <-c.cleaned - return nil, errRetry + return empty, errRetry } pw, ok, _ := progress.NewFromContext(ctx) if ok { @@ -145,7 +146,7 @@ func (c *call) wait(ctx context.Context) (v interface{}, err error) { case <-c.ctx.done: // could return if no error c.mu.Unlock() <-c.cleaned - return nil, errRetry + return empty, errRetry default: } @@ -174,13 +175,13 @@ func (c *call) wait(ctx context.Context) (v interface{}, err error) { if ok { c.progressState.close(pw) } - return nil, ctx.Err() + return empty, ctx.Err() case <-c.ready: return c.result, c.err // shared not implemented yet } } -func (c *call) Deadline() (deadline time.Time, ok bool) { +func (c *call[T]) Deadline() (deadline time.Time, ok bool) { c.mu.Lock() defer c.mu.Unlock() for _, ctx := range c.ctxs { @@ -196,11 +197,11 @@ func (c *call) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } -func (c *call) Done() <-chan struct{} { +func (c *call[T]) Done() <-chan struct{} { return c.ctx.done } -func (c *call) Err() error { +func (c *call[T]) Err() error { select { case <-c.ctx.Done(): return c.ctx.err @@ -209,7 +210,7 @@ func (c *call) Err() error { } } -func (c *call) Value(key interface{}) interface{} { +func (c *call[T]) Value(key interface{}) interface{} { if key == contextKey { return c.progressState } @@ -239,17 +240,17 @@ func (c *call) Value(key interface{}) interface{} { return nil } -type sharedContext struct { - *call +type sharedContext[T any] struct { + *call[T] done chan struct{} err error } -func newContext(c *call) *sharedContext { - return &sharedContext{call: c, done: make(chan struct{})} +func newContext[T any](c *call[T]) *sharedContext[T] { + return &sharedContext[T]{call: c, done: make(chan struct{})} } -func (sc *sharedContext) checkDone() bool { +func (sc *sharedContext[T]) checkDone() bool { sc.mu.Lock() select { case <-sc.done: diff --git a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go b/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go index 6cd9fae98e..710bc1ec8b 100644 --- a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go +++ b/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go @@ -1,16 +1,17 @@ package grpcerrors import ( + "context" "encoding/json" "errors" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" rpc "github.com/gogo/googleapis/google/rpc" gogotypes "github.com/gogo/protobuf/types" "github.com/golang/protobuf/proto" //nolint:staticcheck "github.com/golang/protobuf/ptypes/any" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/stack" - "github.com/sirupsen/logrus" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -25,7 +26,7 @@ type TypedErrorProto interface { WrapError(error) error } -func ToGRPC(err error) error { +func ToGRPC(ctx context.Context, err error) error { if err == nil { return nil } @@ -64,7 +65,7 @@ func ToGRPC(err error) error { }) if len(details) > 0 { - if st2, err := withDetails(st, details...); err == nil { + if st2, err := withDetails(ctx, st, details...); err == nil { st = st2 } } @@ -72,7 +73,7 @@ func ToGRPC(err error) error { return st.Err() } -func withDetails(s *status.Status, details ...proto.Message) (*status.Status, error) { +func withDetails(ctx context.Context, s *status.Status, details ...proto.Message) (*status.Status, error) { if s.Code() == codes.OK { return nil, errors.New("no error details for status with code OK") } @@ -80,7 +81,7 @@ func withDetails(s *status.Status, details ...proto.Message) (*status.Status, er for _, detail := range details { url, err := typeurl.TypeURL(detail) if err != nil { - logrus.Warnf("ignoring typed error %T: not registered", detail) + bklog.G(ctx).Warnf("ignoring typed error %T: not registered", detail) continue } dt, err := json.Marshal(detail) diff --git a/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go b/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go index 1c17e4c67d..a592078910 100644 --- a/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go +++ b/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go @@ -15,7 +15,7 @@ func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.Una oldErr := err if err != nil { stack.Helper() - err = ToGRPC(err) + err = ToGRPC(ctx, err) } if oldErr != nil && err == nil { logErr := errors.Wrap(err, "invalid grpc error conversion") @@ -30,7 +30,7 @@ func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.Una } func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - err := ToGRPC(handler(srv, ss)) + err := ToGRPC(ss.Context(), handler(srv, ss)) if err != nil { stack.Helper() } @@ -50,5 +50,5 @@ func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grp if err != nil { stack.Helper() } - return s, ToGRPC(err) + return s, ToGRPC(ctx, err) } diff --git a/vendor/github.com/moby/buildkit/util/imageutil/buildinfo.go b/vendor/github.com/moby/buildkit/util/imageutil/buildinfo.go deleted file mode 100644 index 7196453c33..0000000000 --- a/vendor/github.com/moby/buildkit/util/imageutil/buildinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -package imageutil - -import ( - "encoding/base64" - "encoding/json" - - binfotypes "github.com/moby/buildkit/util/buildinfo/types" - "github.com/pkg/errors" -) - -// BuildInfo returns build info from image config. -// -// Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md -func BuildInfo(dt []byte) (*binfotypes.BuildInfo, error) { - if len(dt) == 0 { - return nil, nil - } - var config binfotypes.ImageConfig - if err := json.Unmarshal(dt, &config); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal image config") - } - if len(config.BuildInfo) == 0 { - return nil, nil - } - dtbi, err := base64.StdEncoding.DecodeString(config.BuildInfo) - if err != nil { - return nil, err - } - var bi binfotypes.BuildInfo - if err = json.Unmarshal(dtbi, &bi); err != nil { - return nil, errors.Wrap(err, "failed to decode buildinfo from image config") - } - return &bi, nil -} diff --git a/vendor/github.com/moby/buildkit/util/imageutil/config.go b/vendor/github.com/moby/buildkit/util/imageutil/config.go index 76e0a5da35..f183db5872 100644 --- a/vendor/github.com/moby/buildkit/util/imageutil/config.go +++ b/vendor/github.com/moby/buildkit/util/imageutil/config.go @@ -3,6 +3,8 @@ package imageutil import ( "context" "encoding/json" + "fmt" + "strings" "sync" "time" @@ -13,7 +15,11 @@ import ( "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" - "github.com/moby/buildkit/util/attestation" + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/solver/pb" + srctypes "github.com/moby/buildkit/source/types" + "github.com/moby/buildkit/sourcepolicy" + spb "github.com/moby/buildkit/sourcepolicy/pb" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/resolver/limited" @@ -47,7 +53,17 @@ func AddLease(f func(context.Context) error) { leasesMu.Unlock() } -func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, leaseManager leases.Manager, p *ocispecs.Platform) (digest.Digest, []byte, error) { +// ResolveToNonImageError is returned by the resolver when the ref is mutated by policy to a non-image ref +type ResolveToNonImageError struct { + Ref string + Updated string +} + +func (e ResolveToNonImageError) Error() string { + return fmt.Sprintf("ref mutated by policy to non-image: %s://%s -> %s", srctypes.DockerImageScheme, e.Ref, e.Updated) +} + +func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, leaseManager leases.Manager, p *ocispecs.Platform, spls []*spb.Policy) (string, digest.Digest, []byte, error) { // TODO: fix buildkit to take interface instead of struct var platform platforms.MatchComparer if p != nil { @@ -57,13 +73,44 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co } ref, err := reference.Parse(str) if err != nil { - return "", nil, errors.WithStack(err) + return "", "", nil, errors.WithStack(err) + } + + op := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: srctypes.DockerImageScheme + "://" + ref.String(), + }, + }, + } + + mut, err := sourcepolicy.NewEngine(spls).Evaluate(ctx, op) + if err != nil { + return "", "", nil, errors.Wrap(err, "could not resolve image due to policy") + } + + if mut { + var ( + t string + ok bool + ) + t, newRef, ok := strings.Cut(op.GetSource().GetIdentifier(), "://") + if !ok { + return "", "", nil, errors.Errorf("could not parse ref: %s", op.GetSource().GetIdentifier()) + } + if ok && t != srctypes.DockerImageScheme { + return "", "", nil, &ResolveToNonImageError{Ref: str, Updated: newRef} + } + ref, err = reference.Parse(newRef) + if err != nil { + return "", "", nil, errors.WithStack(err) + } } if leaseManager != nil { ctx2, done, err := leaseutil.WithLease(ctx, leaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary) if err != nil { - return "", nil, errors.WithStack(err) + return "", "", nil, errors.WithStack(err) } ctx = ctx2 defer func() { @@ -94,24 +141,25 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co if desc.MediaType == "" { _, desc, err = resolver.Resolve(ctx, ref.String()) if err != nil { - return "", nil, err + return "", "", nil, err } } fetcher, err := resolver.Fetcher(ctx, ref.String()) if err != nil { - return "", nil, err + return "", "", nil, err } if desc.MediaType == images.MediaTypeDockerSchema1Manifest { - return readSchema1Config(ctx, ref.String(), desc, fetcher, cache) + dgst, dt, err := readSchema1Config(ctx, ref.String(), desc, fetcher, cache) + return ref.String(), dgst, dt, err } children := childrenConfigHandler(cache, platform) dslHandler, err := docker.AppendDistributionSourceLabel(cache, ref.String()) if err != nil { - return "", nil, err + return "", "", nil, err } handlers := []images.Handler{ @@ -120,19 +168,19 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co children, } if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil { - return "", nil, err + return "", "", nil, err } config, err := images.Config(ctx, cache, desc, platform) if err != nil { - return "", nil, err + return "", "", nil, err } dt, err := content.ReadBlob(ctx, cache, config) if err != nil { - return "", nil, err + return "", "", nil, err } - return desc.Digest, dt, nil + return ref.String(), desc.Digest, dt, nil } func childrenConfigHandler(provider content.Provider, platform platforms.MatchComparer) images.HandlerFunc { @@ -174,7 +222,7 @@ func childrenConfigHandler(provider content.Provider, platform platforms.MatchCo descs = append(descs, index.Manifests...) } case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, docker.LegacyConfigMediaType, - attestation.MediaTypeDockerSchema2AttestationType: + intoto.PayloadType: // childless data types. return nil, nil default: diff --git a/vendor/github.com/moby/buildkit/util/leaseutil/manager.go b/vendor/github.com/moby/buildkit/util/leaseutil/manager.go index 45a35273a5..a02fb9613c 100644 --- a/vendor/github.com/moby/buildkit/util/leaseutil/manager.go +++ b/vendor/github.com/moby/buildkit/util/leaseutil/manager.go @@ -35,41 +35,49 @@ func MakeTemporary(l *leases.Lease) error { return nil } -func WithNamespace(lm leases.Manager, ns string) leases.Manager { - return &nsLM{manager: lm, ns: ns} +func WithNamespace(lm leases.Manager, ns string) *Manager { + return &Manager{manager: lm, ns: ns} } -type nsLM struct { +type Manager struct { manager leases.Manager ns string } -func (l *nsLM) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, error) { +func (l *Manager) Namespace() string { + return l.ns +} + +func (l *Manager) WithNamespace(ns string) *Manager { + return WithNamespace(l.manager, ns) +} + +func (l *Manager) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, error) { ctx = namespaces.WithNamespace(ctx, l.ns) return l.manager.Create(ctx, opts...) } -func (l *nsLM) Delete(ctx context.Context, lease leases.Lease, opts ...leases.DeleteOpt) error { +func (l *Manager) Delete(ctx context.Context, lease leases.Lease, opts ...leases.DeleteOpt) error { ctx = namespaces.WithNamespace(ctx, l.ns) return l.manager.Delete(ctx, lease, opts...) } -func (l *nsLM) List(ctx context.Context, filters ...string) ([]leases.Lease, error) { +func (l *Manager) List(ctx context.Context, filters ...string) ([]leases.Lease, error) { ctx = namespaces.WithNamespace(ctx, l.ns) return l.manager.List(ctx, filters...) } -func (l *nsLM) AddResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error { +func (l *Manager) AddResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error { ctx = namespaces.WithNamespace(ctx, l.ns) return l.manager.AddResource(ctx, lease, resource) } -func (l *nsLM) DeleteResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error { +func (l *Manager) DeleteResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error { ctx = namespaces.WithNamespace(ctx, l.ns) return l.manager.DeleteResource(ctx, lease, resource) } -func (l *nsLM) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { +func (l *Manager) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { ctx = namespaces.WithNamespace(ctx, l.ns) return l.manager.ListResources(ctx, lease) } diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/allowempty.s b/vendor/github.com/moby/buildkit/util/network/cniprovider/allowempty.s deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go index 2bebfd638e..2d37aa94a1 100644 --- a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go @@ -242,28 +242,56 @@ func (c *cniProvider) newNS(ctx context.Context, hostname string) (*cniNS, error cni.WithArgs("IgnoreUnknown", "1")) } - if _, err := c.CNI.Setup(context.TODO(), id, nativeID, nsOpts...); err != nil { + cniRes, err := c.CNI.Setup(context.TODO(), id, nativeID, nsOpts...) + if err != nil { deleteNetNS(nativeID) return nil, errors.Wrap(err, "CNI setup error") } trace.SpanFromContext(ctx).AddEvent("finished setting up network namespace") bklog.G(ctx).Debugf("finished setting up network namespace %s", id) - return &cniNS{ + vethName := "" + for k := range cniRes.Interfaces { + if strings.HasPrefix(k, "veth") { + if vethName != "" { + // invalid config + vethName = "" + break + } + vethName = k + } + } + + ns := &cniNS{ nativeID: nativeID, id: id, handle: c.CNI, opts: nsOpts, - }, nil + vethName: vethName, + } + + if ns.vethName != "" { + sample, err := ns.sample() + if err == nil && sample != nil { + ns.canSample = true + ns.offsetSample = sample + } + } + + return ns, nil } type cniNS struct { - pool *cniPool - handle cni.CNI - id string - nativeID string - opts []cni.NamespaceOpts - lastUsed time.Time + pool *cniPool + handle cni.CNI + id string + nativeID string + opts []cni.NamespaceOpts + lastUsed time.Time + vethName string + canSample bool + offsetSample *network.Sample + prevSample *network.Sample } func (ns *cniNS) Set(s *specs.Spec) error { @@ -271,6 +299,9 @@ func (ns *cniNS) Set(s *specs.Spec) error { } func (ns *cniNS) Close() error { + if ns.prevSample != nil { + ns.offsetSample = ns.prevSample + } if ns.pool == nil { return ns.release() } @@ -278,6 +309,30 @@ func (ns *cniNS) Close() error { return nil } +func (ns *cniNS) Sample() (*network.Sample, error) { + if !ns.canSample { + return nil, nil + } + s, err := ns.sample() + if err != nil { + return nil, err + } + if s == nil { + return nil, nil + } + if ns.offsetSample != nil { + s.TxBytes -= ns.offsetSample.TxBytes + s.RxBytes -= ns.offsetSample.RxBytes + s.TxPackets -= ns.offsetSample.TxPackets + s.RxPackets -= ns.offsetSample.RxPackets + s.TxErrors -= ns.offsetSample.TxErrors + s.RxErrors -= ns.offsetSample.RxErrors + s.TxDropped -= ns.offsetSample.TxDropped + s.RxDropped -= ns.offsetSample.RxDropped + } + return s, nil +} + func (ns *cniNS) release() error { bklog.L.Debugf("releasing cni network namespace %s", ns.id) err := ns.handle.Remove(context.TODO(), ns.id, ns.nativeID, ns.opts...) diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_linux.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_linux.go new file mode 100644 index 0000000000..8c4ac437e1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_linux.go @@ -0,0 +1,70 @@ +package cniprovider + +import ( + "path/filepath" + "strconv" + "strings" + "syscall" + + "github.com/moby/buildkit/util/network" + "github.com/pkg/errors" +) + +func (ns *cniNS) sample() (*network.Sample, error) { + dirfd, err := syscall.Open(filepath.Join("/sys/class/net", ns.vethName, "statistics"), syscall.O_RDONLY, 0) + if err != nil { + if errors.Is(err, syscall.ENOENT) || errors.Is(err, syscall.ENOTDIR) { + return nil, nil + } + return nil, err + } + defer syscall.Close(dirfd) + + buf := make([]byte, 32) + stat := &network.Sample{} + + for _, name := range []string{"tx_bytes", "rx_bytes", "tx_packets", "rx_packets", "tx_errors", "rx_errors", "tx_dropped", "rx_dropped"} { + n, err := readFileAt(dirfd, name, buf) + if err != nil { + return nil, errors.Wrapf(err, "failed to read %s", name) + } + switch name { + case "tx_bytes": + stat.TxBytes = n + case "rx_bytes": + stat.RxBytes = n + case "tx_packets": + stat.TxPackets = n + case "rx_packets": + stat.RxPackets = n + case "tx_errors": + stat.TxErrors = n + case "rx_errors": + stat.RxErrors = n + case "tx_dropped": + stat.TxDropped = n + case "rx_dropped": + stat.RxDropped = n + } + } + ns.prevSample = stat + return stat, nil +} + +func readFileAt(dirfd int, filename string, buf []byte) (int64, error) { + fd, err := syscall.Openat(dirfd, filename, syscall.O_RDONLY, 0) + if err != nil { + return 0, err + } + defer syscall.Close(fd) + + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return 0, err + } + nn, err := strconv.ParseInt(strings.TrimSpace(string(buf[:n])), 10, 64) + if err != nil { + return 0, err + } + return nn, nil +} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_nolinux.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_nolinux.go new file mode 100644 index 0000000000..383798b962 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_nolinux.go @@ -0,0 +1,12 @@ +//go:build !linux +// +build !linux + +package cniprovider + +import ( + "github.com/moby/buildkit/util/network" +) + +func (ns *cniNS) sample() (*network.Sample, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go deleted file mode 100644 index eb6dcacefc..0000000000 --- a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build linux -// +build linux - -package cniprovider - -import ( - _ "unsafe" // required for go:linkname. -) - -//go:linkname beforeFork syscall.runtime_BeforeFork -func beforeFork() - -//go:linkname afterFork syscall.runtime_AfterFork -func afterFork() - -//go:linkname afterForkInChild syscall.runtime_AfterForkInChild -func afterForkInChild() diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go index a05bc0a441..e8a3d5054f 100644 --- a/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go @@ -4,10 +4,11 @@ package cniprovider import ( + "fmt" "os" "path/filepath" + "runtime" "syscall" - "unsafe" "github.com/containerd/containerd/oci" "github.com/moby/buildkit/util/bklog" @@ -38,7 +39,16 @@ func cleanOldNamespaces(c *cniProvider) { }() } -func createNetNS(c *cniProvider, id string) (string, error) { +// unshareAndMount needs to be called in a separate thread +func unshareAndMountNetNS(target string) error { + if err := syscall.Unshare(syscall.CLONE_NEWNET); err != nil { + return err + } + + return syscall.Mount(fmt.Sprintf("/proc/self/task/%d/ns/net", syscall.Gettid()), target, "", syscall.MS_BIND, "") +} + +func createNetNS(c *cniProvider, id string) (_ string, err error) { nsPath := filepath.Join(c.root, "net/cni", id) if err := os.MkdirAll(filepath.Dir(nsPath), 0700); err != nil { return "", err @@ -46,55 +56,34 @@ func createNetNS(c *cniProvider, id string) (string, error) { f, err := os.Create(nsPath) if err != nil { - deleteNetNS(nsPath) return "", err } - if err := f.Close(); err != nil { - deleteNetNS(nsPath) - return "", err - } - procNetNSBytes, err := syscall.BytePtrFromString("/proc/self/ns/net") - if err != nil { - deleteNetNS(nsPath) - return "", err - } - nsPathBytes, err := syscall.BytePtrFromString(nsPath) - if err != nil { - deleteNetNS(nsPath) - return "", err - } - beforeFork() - - pid, _, errno := syscall.RawSyscall6(syscall.SYS_CLONE, uintptr(syscall.SIGCHLD)|unix.CLONE_NEWNET, 0, 0, 0, 0, 0) - if errno != 0 { - afterFork() - deleteNetNS(nsPath) - return "", errno - } - - if pid != 0 { - afterFork() - var ws unix.WaitStatus - _, err = unix.Wait4(int(pid), &ws, 0, nil) - for err == syscall.EINTR { - _, err = unix.Wait4(int(pid), &ws, 0, nil) - } - + defer func() { if err != nil { deleteNetNS(nsPath) - return "", errors.Wrapf(err, "failed to find pid=%d process", pid) } - errno = syscall.Errno(ws.ExitStatus()) - if errno != 0 { - deleteNetNS(nsPath) - return "", errors.Wrapf(errno, "failed to mount %s (pid=%d)", nsPath, pid) - } - return nsPath, nil + }() + if err := f.Close(); err != nil { + return "", err } - afterForkInChild() - _, _, errno = syscall.RawSyscall6(syscall.SYS_MOUNT, uintptr(unsafe.Pointer(procNetNSBytes)), uintptr(unsafe.Pointer(nsPathBytes)), 0, uintptr(unix.MS_BIND), 0, 0) - syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) - panic("unreachable") + + errCh := make(chan error) + + go func() { + defer close(errCh) + runtime.LockOSThread() + + if err := unshareAndMountNetNS(nsPath); err != nil { + errCh <- err + } + + // we leave the thread locked so go runtime terminates the thread + }() + + if err := <-errCh; err != nil { + return "", err + } + return nsPath, nil } func setNetNS(s *specs.Spec, nsPath string) error { diff --git a/vendor/github.com/moby/buildkit/util/network/host.go b/vendor/github.com/moby/buildkit/util/network/host.go index fbd6747d00..d1725dd22a 100644 --- a/vendor/github.com/moby/buildkit/util/network/host.go +++ b/vendor/github.com/moby/buildkit/util/network/host.go @@ -35,3 +35,7 @@ func (h *hostNS) Set(s *specs.Spec) error { func (h *hostNS) Close() error { return nil } + +func (h *hostNS) Sample() (*Sample, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/buildkit/util/network/netproviders/network_unix.go b/vendor/github.com/moby/buildkit/util/network/netproviders/network_unix.go index b8d733ec32..d521739322 100644 --- a/vendor/github.com/moby/buildkit/util/network/netproviders/network_unix.go +++ b/vendor/github.com/moby/buildkit/util/network/netproviders/network_unix.go @@ -4,8 +4,8 @@ package netproviders import ( + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/network" - "github.com/sirupsen/logrus" ) func getHostProvider() (network.Provider, bool) { @@ -13,6 +13,6 @@ func getHostProvider() (network.Provider, bool) { } func getFallback() (network.Provider, string) { - logrus.Warn("using host network as the default") + bklog.L.Warn("using host network as the default") return network.NewHostProvider(), "host" } diff --git a/vendor/github.com/moby/buildkit/util/network/netproviders/network_windows.go b/vendor/github.com/moby/buildkit/util/network/netproviders/network_windows.go index c7e460e333..0a17a36db3 100644 --- a/vendor/github.com/moby/buildkit/util/network/netproviders/network_windows.go +++ b/vendor/github.com/moby/buildkit/util/network/netproviders/network_windows.go @@ -4,8 +4,8 @@ package netproviders import ( + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/network" - "github.com/sirupsen/logrus" ) func getHostProvider() (network.Provider, bool) { @@ -13,6 +13,6 @@ func getHostProvider() (network.Provider, bool) { } func getFallback() (network.Provider, string) { - logrus.Warn("using null network as the default") + bklog.L.Warn("using null network as the default") return network.NewNoneProvider(), "" } diff --git a/vendor/github.com/moby/buildkit/util/network/network.go b/vendor/github.com/moby/buildkit/util/network/network.go index c48f1984f0..4ff1bb81c3 100644 --- a/vendor/github.com/moby/buildkit/util/network/network.go +++ b/vendor/github.com/moby/buildkit/util/network/network.go @@ -7,6 +7,17 @@ import ( specs "github.com/opencontainers/runtime-spec/specs-go" ) +type Sample struct { + RxBytes int64 `json:"rxBytes,omitempty"` + RxPackets int64 `json:"rxPackets,omitempty"` + RxErrors int64 `json:"rxErrors,omitempty"` + RxDropped int64 `json:"rxDropped,omitempty"` + TxBytes int64 `json:"txBytes,omitempty"` + TxPackets int64 `json:"txPackets,omitempty"` + TxErrors int64 `json:"txErrors,omitempty"` + TxDropped int64 `json:"txDropped,omitempty"` +} + // Provider interface for Network type Provider interface { io.Closer @@ -18,4 +29,6 @@ type Namespace interface { io.Closer // Set the namespace on the spec Set(*specs.Spec) error + + Sample() (*Sample, error) } diff --git a/vendor/github.com/moby/buildkit/util/network/none.go b/vendor/github.com/moby/buildkit/util/network/none.go index e2b9d122d6..954229b059 100644 --- a/vendor/github.com/moby/buildkit/util/network/none.go +++ b/vendor/github.com/moby/buildkit/util/network/none.go @@ -31,3 +31,7 @@ func (h *noneNS) Set(s *specs.Spec) error { func (h *noneNS) Close() error { return nil } + +func (h *noneNS) Sample() (*Sample, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/buildkit/util/overlay/overlay.go b/vendor/github.com/moby/buildkit/util/overlay/overlay.go new file mode 100644 index 0000000000..b472034c74 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/overlay/overlay.go @@ -0,0 +1,8 @@ +package overlay + +import "github.com/containerd/containerd/mount" + +// IsOverlayMountType returns true if the mount type is overlay-based +func IsOverlayMountType(mnt mount.Mount) bool { + return mnt.Type == "overlay" +} diff --git a/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go b/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go index f2f69bba06..62179f9ce8 100644 --- a/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go +++ b/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go @@ -13,6 +13,7 @@ import ( "strings" "sync" "syscall" + "time" "github.com/containerd/containerd/archive" "github.com/containerd/containerd/mount" @@ -38,24 +39,23 @@ func GetUpperdir(lower, upper []mount.Mount) (string, error) { // Get layer directories of lower snapshot var lowerlayers []string lowerM := lower[0] - switch lowerM.Type { - case "bind": + if lowerM.Type == "bind" { // lower snapshot is a bind mount of one layer lowerlayers = []string{lowerM.Source} - case "overlay": + } else if IsOverlayMountType(lowerM) { // lower snapshot is an overlay mount of multiple layers var err error lowerlayers, err = GetOverlayLayers(lowerM) if err != nil { return "", err } - default: + } else { return "", errors.Errorf("cannot get layer information from mount option (type = %q)", lowerM.Type) } // Get layer directories of upper snapshot upperM := upper[0] - if upperM.Type != "overlay" { + if !IsOverlayMountType(upperM) { return "", errors.Errorf("upper snapshot isn't overlay mounted (type = %q)", upperM.Type) } upperlayers, err := GetOverlayLayers(upperM) @@ -127,7 +127,8 @@ func WriteUpperdir(ctx context.Context, w io.Writer, upperdir string, lower []mo } return mount.WithTempMount(ctx, lower, func(lowerRoot string) error { return mount.WithTempMount(ctx, upperView, func(upperViewRoot string) error { - cw := archive.NewChangeWriter(&cancellableWriter{ctx, w}, upperViewRoot) + // WithWhiteoutTime(0) will no longer need to be specified when https://github.com/containerd/containerd/pull/8764 gets merged + cw := archive.NewChangeWriter(&cancellableWriter{ctx, w}, upperViewRoot, archive.WithWhiteoutTime(time.Unix(0, 0).UTC())) if err := Changes(ctx, cw.HandleChange, upperdir, upperViewRoot, lowerRoot); err != nil { if err2 := cw.Close(); err2 != nil { return errors.Wrapf(err, "failed to record upperdir changes (close error: %v)", err2) diff --git a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go index a856db8caa..7cce8a7ca7 100644 --- a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go +++ b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go @@ -65,7 +65,7 @@ func (ps *MultiWriter) Write(id string, v interface{}) error { Sys: v, meta: ps.meta, } - return ps.WriteRawProgress(p) + return ps.writeRawProgress(p) } func (ps *MultiWriter) WriteRawProgress(p *Progress) error { diff --git a/vendor/github.com/moby/buildkit/util/progress/progressui/colors.go b/vendor/github.com/moby/buildkit/util/progress/progressui/colors.go index f6d3174769..9758f6b5d6 100644 --- a/vendor/github.com/moby/buildkit/util/progress/progressui/colors.go +++ b/vendor/github.com/moby/buildkit/util/progress/progressui/colors.go @@ -6,8 +6,8 @@ import ( "strconv" "strings" + "github.com/moby/buildkit/util/bklog" "github.com/morikuni/aec" - "github.com/sirupsen/logrus" ) var termColorMap = map[string]aec.ANSI{ @@ -41,7 +41,7 @@ func setUserDefinedTermColors(colorsEnv string) { k, v, ok := strings.Cut(field, "=") if !ok || strings.Contains(v, "=") { err := errors.New("A valid entry must have exactly two fields") - logrus.WithError(err).Warnf("Could not parse BUILDKIT_COLORS component: %s", field) + bklog.L.WithError(err).Warnf("Could not parse BUILDKIT_COLORS component: %s", field) continue } k = strings.ToLower(k) @@ -53,7 +53,7 @@ func setUserDefinedTermColors(colorsEnv string) { } } else { err := errors.New("Colors must be a name from the pre-defined list or a valid 3-part RGB value") - logrus.WithError(err).Warnf("Unknown color value found in BUILDKIT_COLORS: %s=%s", k, v) + bklog.L.WithError(err).Warnf("Unknown color value found in BUILDKIT_COLORS: %s=%s", k, v) } } } @@ -63,7 +63,7 @@ func readBuildkitColorsEnv(colorsEnv string) []string { csvReader.Comma = ':' fields, err := csvReader.Read() if err != nil { - logrus.WithError(err).Warnf("Could not parse BUILDKIT_COLORS. Falling back to defaults.") + bklog.L.WithError(err).Warnf("Could not parse BUILDKIT_COLORS. Falling back to defaults.") return nil } return fields @@ -73,12 +73,12 @@ func readRGB(v string) aec.ANSI { csvReader := csv.NewReader(strings.NewReader(v)) fields, err := csvReader.Read() if err != nil { - logrus.WithError(err).Warnf("Could not parse value %s as valid comma-separated RGB color. Ignoring.", v) + bklog.L.WithError(err).Warnf("Could not parse value %s as valid comma-separated RGB color. Ignoring.", v) return nil } if len(fields) != 3 { err = errors.New("A valid RGB color must have three fields") - logrus.WithError(err).Warnf("Could not parse value %s as valid RGB color. Ignoring.", v) + bklog.L.WithError(err).Warnf("Could not parse value %s as valid RGB color. Ignoring.", v) return nil } ok := isValidRGB(fields) @@ -103,7 +103,7 @@ func parseKeys(k string, c aec.ANSI) { case "warning": colorWarning = c default: - logrus.Warnf("Unknown key found in BUILDKIT_COLORS (expected: run, cancel, error, or warning): %s", k) + bklog.L.Warnf("Unknown key found in BUILDKIT_COLORS (expected: run, cancel, error, or warning): %s", k) } } @@ -111,14 +111,14 @@ func isValidRGB(s []string) bool { for _, n := range s { num, err := strconv.Atoi(n) if err != nil { - logrus.Warnf("A field in BUILDKIT_COLORS appears to contain an RGB value that is not an integer: %s", strings.Join(s, ",")) + bklog.L.Warnf("A field in BUILDKIT_COLORS appears to contain an RGB value that is not an integer: %s", strings.Join(s, ",")) return false } ok := isValidRGBValue(num) if ok { continue } else { - logrus.Warnf("A field in BUILDKIT_COLORS appears to contain an RGB value that is not within the valid range of 0-255: %s", strings.Join(s, ",")) + bklog.L.Warnf("A field in BUILDKIT_COLORS appears to contain an RGB value that is not within the valid range of 0-255: %s", strings.Join(s, ",")) return false } } diff --git a/vendor/github.com/moby/buildkit/util/progress/progressui/display.go b/vendor/github.com/moby/buildkit/util/progress/progressui/display.go index edbdaaa75e..4ceb4f5264 100644 --- a/vendor/github.com/moby/buildkit/util/progress/progressui/display.go +++ b/vendor/github.com/moby/buildkit/util/progress/progressui/display.go @@ -21,11 +21,37 @@ import ( "golang.org/x/time/rate" ) -func DisplaySolveStatus(ctx context.Context, phase string, c console.Console, w io.Writer, ch chan *client.SolveStatus) ([]client.VertexWarning, error) { +type displaySolveStatusOpts struct { + phase string + textDesc string + consoleDesc string +} + +type DisplaySolveStatusOpt func(b *displaySolveStatusOpts) + +func WithPhase(phase string) DisplaySolveStatusOpt { + return func(b *displaySolveStatusOpts) { + b.phase = phase + } +} + +func WithDesc(text string, console string) DisplaySolveStatusOpt { + return func(b *displaySolveStatusOpts) { + b.textDesc = text + b.consoleDesc = console + } +} + +func DisplaySolveStatus(ctx context.Context, c console.Console, w io.Writer, ch chan *client.SolveStatus, opts ...DisplaySolveStatusOpt) ([]client.VertexWarning, error) { modeConsole := c != nil - disp := &display{c: c, phase: phase} - printer := &textMux{w: w} + dsso := &displaySolveStatusOpts{} + for _, opt := range opts { + opt(dsso) + } + + disp := &display{c: c, phase: dsso.phase, desc: dsso.consoleDesc} + printer := &textMux{w: w, desc: dsso.textDesc} if disp.phase == "" { disp.phase = "Building" @@ -556,7 +582,7 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) { } else if sec < 100 { prec = 2 } - v.logs = append(v.logs, []byte(fmt.Sprintf("#%d %s %s", v.index, fmt.Sprintf("%.[2]*[1]f", sec, prec), dt))) + v.logs = append(v.logs, []byte(fmt.Sprintf("%s %s", fmt.Sprintf("%.[2]*[1]f", sec, prec), dt))) } i++ }) @@ -711,6 +737,7 @@ func addTime(tm *time.Time, d time.Duration) *time.Time { type display struct { c console.Console phase string + desc string lineCount int repeated bool } @@ -784,7 +811,11 @@ func (disp *display) print(d displayInfo, width, height int, all bool) { defer fmt.Fprint(disp.c, aec.Show) out := fmt.Sprintf("[+] %s %.1fs (%d/%d) %s", disp.phase, time.Since(d.startTime).Seconds(), d.countCompleted, d.countTotal, statusStr) - out = align(out, "", width) + if disp.desc != "" { + out = align(out, disp.desc, width-1) + } else { + out = align(out, "", width) + } fmt.Fprintln(disp.c, out) lineCount := 0 for _, j := range d.jobs { diff --git a/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go b/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go index cc8e45be29..338079d474 100644 --- a/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go +++ b/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go @@ -32,6 +32,7 @@ type textMux struct { last map[string]lastStatus notFirst bool nextIndex int + desc string } func (p *textMux) printVtx(t *trace, dgst digest.Digest) { @@ -63,6 +64,9 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) { if p.notFirst { fmt.Fprintln(p.w, "") } else { + if p.desc != "" { + fmt.Fprintf(p.w, "#0 %s\n\n", p.desc) + } p.notFirst = true } @@ -139,10 +143,13 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) { } for i, l := range v.logs { - if i == 0 { + if i == 0 && v.logsOffset != 0 { // index has already been printed l = l[v.logsOffset:] + fmt.Fprintf(p.w, "%s", l) + } else { + fmt.Fprintf(p.w, "#%d %s", v.index, []byte(l)) } - fmt.Fprintf(p.w, "%s", []byte(l)) + if i != len(v.logs)-1 || !v.logsPartial { fmt.Fprintln(p.w, "") } diff --git a/vendor/github.com/moby/buildkit/util/pull/pull.go b/vendor/github.com/moby/buildkit/util/pull/pull.go index c66c4e784a..9527953c48 100644 --- a/vendor/github.com/moby/buildkit/util/pull/pull.go +++ b/vendor/github.com/moby/buildkit/util/pull/pull.go @@ -10,7 +10,7 @@ import ( "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" - "github.com/containerd/containerd/remotes/docker/schema1" + "github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // SA1019 deprecated "github.com/moby/buildkit/session" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/flightcontrol" @@ -32,7 +32,7 @@ type Puller struct { Src reference.Spec Platform ocispecs.Platform - g flightcontrol.Group + g flightcontrol.Group[struct{}] resolveErr error resolveDone bool desc ocispecs.Descriptor @@ -54,9 +54,9 @@ type PulledManifests struct { } func (p *Puller) resolve(ctx context.Context, resolver remotes.Resolver) error { - _, err := p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) { + _, err := p.g.Do(ctx, "", func(ctx context.Context) (_ struct{}, err error) { if p.resolveErr != nil || p.resolveDone { - return nil, p.resolveErr + return struct{}{}, p.resolveErr } defer func() { if !errors.Is(err, context.Canceled) { @@ -68,12 +68,12 @@ func (p *Puller) resolve(ctx context.Context, resolver remotes.Resolver) error { } ref, desc, err := resolver.Resolve(ctx, p.Src.String()) if err != nil { - return nil, err + return struct{}{}, err } p.desc = desc p.ref = ref p.resolveDone = true - return nil, nil + return struct{}{}, nil }) return err } @@ -233,15 +233,15 @@ func filterLayerBlobs(metadata map[digest.Digest]ocispecs.Descriptor, mu sync.Lo switch desc.MediaType { case ocispecs.MediaTypeImageLayer, - ocispecs.MediaTypeImageLayerNonDistributable, + ocispecs.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerForeign, ocispecs.MediaTypeImageLayerGzip, images.MediaTypeDockerSchema2LayerGzip, - ocispecs.MediaTypeImageLayerNonDistributableGzip, + ocispecs.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. images.MediaTypeDockerSchema2LayerForeignGzip, ocispecs.MediaTypeImageLayerZstd, - ocispecs.MediaTypeImageLayerNonDistributableZstd: + ocispecs.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return nil, images.ErrSkipDesc default: if metadata != nil { diff --git a/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go b/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go index 93c50106f7..ee8fcadb20 100644 --- a/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go +++ b/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go @@ -8,6 +8,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/remotes" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/progress" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -33,13 +34,14 @@ func (p *ProviderWithProgress) ReaderAt(ctx context.Context, desc ocispecs.Descr ctx, cancel := context.WithCancel(ctx) doneCh := make(chan struct{}) go trackProgress(ctx, desc, p.Manager, doneCh) - return readerAtWithCancel{ReaderAt: ra, cancel: cancel, doneCh: doneCh}, nil + return readerAtWithCancel{ReaderAt: ra, cancel: cancel, doneCh: doneCh, logger: bklog.G(ctx)}, nil } type readerAtWithCancel struct { content.ReaderAt cancel func() doneCh <-chan struct{} + logger *logrus.Entry } func (ra readerAtWithCancel) Close() error { @@ -47,7 +49,7 @@ func (ra readerAtWithCancel) Close() error { select { case <-ra.doneCh: case <-time.After(time.Second): - logrus.Warn("timeout waiting for pull progress to complete") + ra.logger.Warn("timeout waiting for pull progress to complete") } return ra.ReaderAt.Close() } @@ -66,13 +68,14 @@ func (f *FetcherWithProgress) Fetch(ctx context.Context, desc ocispecs.Descripto ctx, cancel := context.WithCancel(ctx) doneCh := make(chan struct{}) go trackProgress(ctx, desc, f.Manager, doneCh) - return readerWithCancel{ReadCloser: rc, cancel: cancel, doneCh: doneCh}, nil + return readerWithCancel{ReadCloser: rc, cancel: cancel, doneCh: doneCh, logger: bklog.G(ctx)}, nil } type readerWithCancel struct { io.ReadCloser cancel func() doneCh <-chan struct{} + logger *logrus.Entry } func (r readerWithCancel) Close() error { @@ -80,7 +83,7 @@ func (r readerWithCancel) Close() error { select { case <-r.doneCh: case <-time.After(time.Second): - logrus.Warn("timeout waiting for pull progress to complete") + r.logger.Warn("timeout waiting for pull progress to complete") } return r.ReadCloser.Close() } @@ -90,10 +93,10 @@ func trackProgress(ctx context.Context, desc ocispecs.Descriptor, manager PullMa ticker := time.NewTicker(150 * time.Millisecond) defer ticker.Stop() - go func() { + go func(ctx context.Context) { <-ctx.Done() ticker.Stop() - }() + }(ctx) pw, _, _ := progress.NewFromContext(ctx) defer pw.Close() @@ -120,7 +123,7 @@ func trackProgress(ctx context.Context, desc ocispecs.Descriptor, manager PullMa }) continue } else if !errors.Is(err, errdefs.ErrNotFound) { - logrus.Errorf("unexpected error getting ingest status of %q: %v", ingestRef, err) + bklog.G(ctx).Errorf("unexpected error getting ingest status of %q: %v", ingestRef, err) return } diff --git a/vendor/github.com/moby/buildkit/util/purl/image.go b/vendor/github.com/moby/buildkit/util/purl/image.go index b3364ba4ce..9eb53f6840 100644 --- a/vendor/github.com/moby/buildkit/util/purl/image.go +++ b/vendor/github.com/moby/buildkit/util/purl/image.go @@ -14,7 +14,7 @@ import ( // RefToPURL converts an image reference with optional platform constraint to a package URL. // Image references are defined in https://github.com/distribution/distribution/blob/v2.8.1/reference/reference.go#L1 // Package URLs are defined in https://github.com/package-url/purl-spec -func RefToPURL(ref string, platform *ocispecs.Platform) (string, error) { +func RefToPURL(purlType string, ref string, platform *ocispecs.Platform) (string, error) { named, err := reference.ParseNormalizedNamed(ref) if err != nil { return "", errors.Wrapf(err, "failed to parse ref %q", ref) @@ -52,7 +52,7 @@ func RefToPURL(ref string, platform *ocispecs.Platform) (string, error) { }) } - p := packageurl.NewPackageURL("docker", ns, name, version, qualifiers, "") + p := packageurl.NewPackageURL(purlType, ns, name, version, qualifiers, "") return p.ToString(), nil } diff --git a/vendor/github.com/moby/buildkit/util/push/push.go b/vendor/github.com/moby/buildkit/util/push/push.go index 881b2fd86f..bef56e5ba3 100644 --- a/vendor/github.com/moby/buildkit/util/push/push.go +++ b/vendor/github.com/moby/buildkit/util/push/push.go @@ -14,8 +14,10 @@ import ( "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/docker/distribution/reference" + intoto "github.com/in-toto/in-toto-golang/in_toto" "github.com/moby/buildkit/session" - "github.com/moby/buildkit/util/attestation" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/imageutil" "github.com/moby/buildkit/util/progress" @@ -27,7 +29,6 @@ import ( digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type pusher struct { @@ -46,6 +47,7 @@ func Pusher(ctx context.Context, resolver remotes.Resolver, ref string) (remotes } func Push(ctx context.Context, sm *session.Manager, sid string, provider content.Provider, manager content.Manager, dgst digest.Digest, ref string, insecure bool, hosts docker.RegistryHosts, byDigest bool, annotations map[digest.Digest]map[string]string) error { + ctx = contentutil.RegisterContentPayloadTypes(ctx) desc := ocispecs.Descriptor{ Digest: dgst, } @@ -250,11 +252,11 @@ func childrenHandler(provider content.Provider) images.HandlerFunc { case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerGzip, - attestation.MediaTypeDockerSchema2AttestationType: + intoto.PayloadType: // childless data types. return nil, nil default: - logrus.Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType) + bklog.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType) } return descs, nil @@ -289,7 +291,7 @@ func updateDistributionSourceHandler(manager content.Manager, pushF images.Handl // update distribution source to layer if islayer { if _, err := updateF(ctx, desc); err != nil { - logrus.Warnf("failed to update distribution source for layer %v: %v", desc.Digest, err) + bklog.G(ctx).Warnf("failed to update distribution source for layer %v: %v", desc.Digest, err) } } return children, nil @@ -297,12 +299,12 @@ func updateDistributionSourceHandler(manager content.Manager, pushF images.Handl } func dedupeHandler(h images.HandlerFunc) images.HandlerFunc { - var g flightcontrol.Group + var g flightcontrol.Group[[]ocispecs.Descriptor] res := map[digest.Digest][]ocispecs.Descriptor{} var mu sync.Mutex return images.HandlerFunc(func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { - res, err := g.Do(ctx, desc.Digest.String(), func(ctx context.Context) (interface{}, error) { + return g.Do(ctx, desc.Digest.String(), func(ctx context.Context) ([]ocispecs.Descriptor, error) { mu.Lock() if r, ok := res[desc.Digest]; ok { mu.Unlock() @@ -320,12 +322,5 @@ func dedupeHandler(h images.HandlerFunc) images.HandlerFunc { mu.Unlock() return children, nil }) - if err != nil { - return nil, err - } - if res == nil { - return nil, nil - } - return res.([]ocispecs.Descriptor), nil }) } diff --git a/vendor/github.com/moby/buildkit/util/resolver/authorizer.go b/vendor/github.com/moby/buildkit/util/resolver/authorizer.go index d97d32dd6f..6c89cf7419 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/authorizer.go +++ b/vendor/github.com/moby/buildkit/util/resolver/authorizer.go @@ -33,7 +33,7 @@ type authHandlerNS struct { hosts map[string][]docker.RegistryHost muHosts sync.Mutex sm *session.Manager - g flightcontrol.Group + g flightcontrol.Group[[]docker.RegistryHost] } func newAuthHandlerNS(sm *session.Manager) *authHandlerNS { @@ -230,7 +230,7 @@ type authResult struct { // authHandler is used to handle auth request per registry server. type authHandler struct { - g flightcontrol.Group + g flightcontrol.Group[*authResult] client *http.Client @@ -295,7 +295,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context, sm *session.Manager, g // Docs: https://docs.docker.com/registry/spec/auth/scope scoped := strings.Join(to.Scopes, " ") - res, err := ah.g.Do(ctx, scoped, func(ctx context.Context) (interface{}, error) { + res, err := ah.g.Do(ctx, scoped, func(ctx context.Context) (*authResult, error) { ah.scopedTokensMu.Lock() r, exist := ah.scopedTokens[scoped] ah.scopedTokensMu.Unlock() @@ -313,15 +313,10 @@ func (ah *authHandler) doBearerAuth(ctx context.Context, sm *session.Manager, g ah.scopedTokensMu.Unlock() return r, nil }) - if err != nil || res == nil { return "", err } - r := res.(*authResult) - if r == nil { - return "", nil - } - return r.token, nil + return res.token, nil } func (ah *authHandler) fetchToken(ctx context.Context, sm *session.Manager, g session.Group, to auth.TokenOptions) (r *authResult, err error) { diff --git a/vendor/github.com/moby/buildkit/util/resolver/limited/group.go b/vendor/github.com/moby/buildkit/util/resolver/limited/group.go index 7fdd947a02..934bd4f4eb 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/limited/group.go +++ b/vendor/github.com/moby/buildkit/util/resolver/limited/group.go @@ -11,8 +11,8 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/remotes" "github.com/docker/distribution/reference" + "github.com/moby/buildkit/util/bklog" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" ) @@ -119,7 +119,7 @@ func (f *fetcher) Fetch(ctx context.Context, desc ocispecs.Descriptor) (io.ReadC rcw := &readCloser{ReadCloser: rc} closer := func() { if !rcw.closed { - logrus.Warnf("fetcher not closed cleanly: %s", desc.Digest) + bklog.G(ctx).Warnf("fetcher not closed cleanly: %s", desc.Digest) } release() } diff --git a/vendor/github.com/moby/buildkit/util/resolver/pool.go b/vendor/github.com/moby/buildkit/util/resolver/pool.go index 292ca2e614..7b6a2ef50d 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/pool.go +++ b/vendor/github.com/moby/buildkit/util/resolver/pool.go @@ -131,7 +131,7 @@ type Resolver struct { // HostsFunc implements registry configuration of this Resolver func (r *Resolver) HostsFunc(host string) ([]docker.RegistryHost, error) { return func(domain string) ([]docker.RegistryHost, error) { - v, err := r.handler.g.Do(context.TODO(), domain, func(ctx context.Context) (interface{}, error) { + v, err := r.handler.g.Do(context.TODO(), domain, func(ctx context.Context) ([]docker.RegistryHost, error) { // long lock not needed because flightcontrol.Do r.handler.muHosts.Lock() v, ok := r.handler.hosts[domain] @@ -151,13 +151,12 @@ func (r *Resolver) HostsFunc(host string) ([]docker.RegistryHost, error) { if err != nil || v == nil { return nil, err } - vv := v.([]docker.RegistryHost) - if len(vv) == 0 { + if len(v) == 0 { return nil, nil } // make a copy so authorizer is set on unique instance - res := make([]docker.RegistryHost, len(vv)) - copy(res, vv) + res := make([]docker.RegistryHost, len(v)) + copy(res, v) auth := newDockerAuthorizer(res[0].Client, r.handler, r.sm, r.g) for i := range res { res[i].Authorizer = auth diff --git a/vendor/github.com/moby/buildkit/util/resolver/resolver.go b/vendor/github.com/moby/buildkit/util/resolver/resolver.go index 0639a1b623..0abe597e08 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/resolver.go +++ b/vendor/github.com/moby/buildkit/util/resolver/resolver.go @@ -6,6 +6,7 @@ import ( "net" "net/http" "os" + "path" "path/filepath" "runtime" "strings" @@ -17,6 +18,10 @@ import ( "github.com/pkg/errors" ) +const ( + defaultPath = "/v2" +) + func fillInsecureOpts(host string, c config.RegistryConfig, h docker.RegistryHost) ([]docker.RegistryHost, error) { var hosts []docker.RegistryHost @@ -126,14 +131,7 @@ func NewRegistryConfig(m map[string]config.RegistryConfig) docker.RegistryHosts var out []docker.RegistryHost for _, mirror := range c.Mirrors { - h := docker.RegistryHost{ - Scheme: "https", - Client: newDefaultClient(), - Host: mirror, - Path: "/v2", - Capabilities: docker.HostCapabilityPull | docker.HostCapabilityResolve, - } - + h := newMirrorRegistryHost(mirror) hosts, err := fillInsecureOpts(mirror, m[mirror], h) if err != nil { return nil, err @@ -169,6 +167,20 @@ func NewRegistryConfig(m map[string]config.RegistryConfig) docker.RegistryHosts ) } +func newMirrorRegistryHost(mirror string) docker.RegistryHost { + mirrorHost, mirrorPath := extractMirrorHostAndPath(mirror) + path := path.Join(defaultPath, mirrorPath) + h := docker.RegistryHost{ + Scheme: "https", + Client: newDefaultClient(), + Host: mirrorHost, + Path: path, + Capabilities: docker.HostCapabilityPull | docker.HostCapabilityResolve, + } + + return h +} + func newDefaultClient() *http.Client { return &http.Client{ Transport: tracing.NewTransport(newDefaultTransport()), diff --git a/vendor/github.com/moby/buildkit/util/resolver/utils.go b/vendor/github.com/moby/buildkit/util/resolver/utils.go new file mode 100644 index 0000000000..cdcd5b83d6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/resolver/utils.go @@ -0,0 +1,22 @@ +package resolver + +import ( + "fmt" + "net/url" + "strings" +) + +func extractMirrorHostAndPath(mirror string) (string, string) { + var path string + host := mirror + + u, err := url.Parse(mirror) + if err != nil || u.Host == "" { + u, err = url.Parse(fmt.Sprintf("//%s", mirror)) + } + if err != nil || u.Host == "" { + return host, path + } + + return u.Host, strings.TrimRight(u.Path, "/") +} diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.go b/vendor/github.com/moby/buildkit/util/stack/stack.go index 18d03630b4..fb9fc3ddf5 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.go +++ b/vendor/github.com/moby/buildkit/util/stack/stack.go @@ -9,7 +9,7 @@ import ( "strings" "sync" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" "github.com/pkg/errors" ) diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go index c4a73a68f4..43809d4876 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go +++ b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.11.4 // source: stack.proto diff --git a/vendor/github.com/moby/buildkit/util/system/path.go b/vendor/github.com/moby/buildkit/util/system/path.go index f6dc70dc8d..94f9a826f2 100644 --- a/vendor/github.com/moby/buildkit/util/system/path.go +++ b/vendor/github.com/moby/buildkit/util/system/path.go @@ -1,5 +1,13 @@ package system +import ( + "path" + "path/filepath" + "strings" + + "github.com/pkg/errors" +) + // DefaultPathEnvUnix is unix style list of directories to search for // executables. Each directory is separated from the next by a colon // ':' character . @@ -16,3 +24,201 @@ func DefaultPathEnv(os string) string { } return DefaultPathEnvUnix } + +// NormalizePath cleans the path based on the operating system the path is meant for. +// It takes into account a potential parent path, and will join the path to the parent +// if the path is relative. Additionally, it will apply the folliwing rules: +// - always return an absolute path +// - always strip drive letters for Windows paths +// - optionally keep the trailing slashes on paths +// - paths are returned using forward slashes +func NormalizePath(parent, newPath, inputOS string, keepSlash bool) (string, error) { + if inputOS == "" { + inputOS = "linux" + } + + newPath = ToSlash(newPath, inputOS) + parent = ToSlash(parent, inputOS) + origPath := newPath + + if parent == "" { + parent = "/" + } + + var err error + parent, err = CheckSystemDriveAndRemoveDriveLetter(parent, inputOS) + if err != nil { + return "", errors.Wrap(err, "removing drive letter") + } + + if !IsAbs(parent, inputOS) { + parent = path.Join("/", parent) + } + + if newPath == "" { + // New workdir is empty. Use the "current" workdir. It should already + // be an absolute path. + newPath = parent + } + + newPath, err = CheckSystemDriveAndRemoveDriveLetter(newPath, inputOS) + if err != nil { + return "", errors.Wrap(err, "removing drive letter") + } + + if !IsAbs(newPath, inputOS) { + // The new WD is relative. Join it to the previous WD. + newPath = path.Join(parent, newPath) + } + + if keepSlash { + if strings.HasSuffix(origPath, "/") && !strings.HasSuffix(newPath, "/") { + newPath += "/" + } else if strings.HasSuffix(origPath, "/.") { + if newPath != "/" { + newPath += "/" + } + newPath += "." + } + } + + return ToSlash(newPath, inputOS), nil +} + +func ToSlash(inputPath, inputOS string) string { + if inputOS != "windows" { + return inputPath + } + return strings.Replace(inputPath, "\\", "/", -1) +} + +func FromSlash(inputPath, inputOS string) string { + separator := "/" + if inputOS == "windows" { + separator = "\\" + } + return strings.Replace(inputPath, "/", separator, -1) +} + +// NormalizeWorkdir will return a normalized version of the new workdir, given +// the currently configured workdir and the desired new workdir. When setting a +// new relative workdir, it will be joined to the previous workdir or default to +// the root folder. +// On Windows we remove the drive letter and convert the path delimiter to "\". +// Paths that begin with os.PathSeparator are considered absolute even on Windows. +func NormalizeWorkdir(current, wd string, inputOS string) (string, error) { + if inputOS == "" { + inputOS = "linux" + } + + wd, err := NormalizePath(current, wd, inputOS, false) + if err != nil { + return "", errors.Wrap(err, "normalizing working directory") + } + + // Make sure we use the platform specific path separator. HCS does not like forward + // slashes in CWD. + return FromSlash(wd, inputOS), nil +} + +// IsAbs returns a boolean value indicating whether or not the path +// is absolute. On Linux, this is just a wrapper for filepath.IsAbs(). +// On Windows, we strip away the drive letter (if any), clean the path, +// and check whether or not the path starts with a filepath.Separator. +// This function is meant to check if a path is absolute, in the context +// of a COPY, ADD or WORKDIR, which have their root set in the mount point +// of the writable layer we are mutating. The filepath.IsAbs() function on +// Windows will not work in these scenatios, as it will return true for paths +// that: +// - Begin with drive letter (DOS style paths) +// - Are volume paths \\?\Volume{UUID} +// - Are UNC paths +func IsAbs(pth, inputOS string) bool { + if inputOS == "" { + inputOS = "linux" + } + cleanedPath, err := CheckSystemDriveAndRemoveDriveLetter(pth, inputOS) + if err != nil { + return false + } + cleanedPath = ToSlash(cleanedPath, inputOS) + // We stripped any potential drive letter and converted any backslashes to + // forward slashes. We can safely use path.IsAbs() for both Windows and Linux. + return path.IsAbs(cleanedPath) +} + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// For linux, this is a no-op. +// +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. It also converts any backslash to forward slash. The conversion +// to OS specific separator should happen as late as possible (ie: before passing the +// value to the function that will actually use it). Paths are parsed and code paths are +// triggered starting with the client and all the way down to calling into the runtime +// environment. The client may run on a foreign OS from the one the build will be triggered +// (Windows clients connecting to Linux or vice versa). +// Keeping the file separator consistent until the last moment is desirable. +// +// We need the Windows path without the drive letter so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:somepath --> somepath // This is a relative path to the CWD set for that drive letter +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +// +// UNC paths can refer to multiple types of paths. From local filesystem paths, +// to remote filesystems like SMB or named pipes. +// There is no sane way to support this without adding a lot of complexity +// which I am not sure is worth it. +// \\.\C$\a --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string) (string, error) { + if inputOS == "" { + inputOS = "linux" + } + + if inputOS != "windows" { + return path, nil + } + + if len(path) == 2 && string(path[1]) == ":" { + return "", errors.Errorf("No relative path specified in %q", path) + } + + // UNC paths should error out + if len(path) >= 2 && ToSlash(path[:2], inputOS) == "//" { + return "", errors.Errorf("UNC paths are not supported") + } + + parts := strings.SplitN(path, ":", 2) + // Path does not have a drive letter. Just return it. + if len(parts) < 2 { + return ToSlash(filepath.Clean(path), inputOS), nil + } + + // We expect all paths to be in C: + if !strings.EqualFold(parts[0], "c") { + return "", errors.New("The specified path is not on the system drive (C:)") + } + + // A path of the form F:somepath, is a path that is relative CWD set for a particular + // drive letter. See: + // https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file#fully-qualified-vs-relative-paths + // + // C:\>mkdir F:somepath + // C:\>dir F:\ + // Volume in drive F is New Volume + // Volume Serial Number is 86E5-AB64 + // + // Directory of F:\ + // + // 11/27/2022 02:22 PM somepath + // 0 File(s) 0 bytes + // 1 Dir(s) 1,052,876,800 bytes free + // + // We must return the second element of the split path, as is, without attempting to convert + // it to an absolute path. We have no knowledge of the CWD; that is treated elsewhere. + return ToSlash(filepath.Clean(parts[1]), inputOS), nil +} diff --git a/vendor/github.com/moby/buildkit/util/system/path_unix.go b/vendor/github.com/moby/buildkit/util/system/path_unix.go deleted file mode 100644 index ff01143eef..0000000000 --- a/vendor/github.com/moby/buildkit/util/system/path_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !windows -// +build !windows - -package system - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. This is a no-op on Linux. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/vendor/github.com/moby/buildkit/util/system/path_windows.go b/vendor/github.com/moby/buildkit/util/system/path_windows.go deleted file mode 100644 index cc7b664d8b..0000000000 --- a/vendor/github.com/moby/buildkit/util/system/path_windows.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build windows -// +build windows - -package system - -import ( - "path/filepath" - "strings" - - "github.com/pkg/errors" -) - -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be contatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", errors.Errorf("No relative path specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", errors.New("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/github.com/moby/buildkit/util/tracing/detect/detect.go b/vendor/github.com/moby/buildkit/util/tracing/detect/detect.go index 13e54bdefc..27c969180a 100644 --- a/vendor/github.com/moby/buildkit/util/tracing/detect/detect.go +++ b/vendor/github.com/moby/buildkit/util/tracing/detect/detect.go @@ -12,7 +12,7 @@ import ( "github.com/pkg/errors" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" ) @@ -79,12 +79,6 @@ func getExporter() (sdktrace.SpanExporter, error) { return nil, err } - if exp != nil { - exp = &threadSafeExporterWrapper{ - exporter: exp, - } - } - if Recorder != nil { Recorder.SpanExporter = exp exp = Recorder diff --git a/vendor/github.com/moby/buildkit/util/tracing/detect/threadsafe.go b/vendor/github.com/moby/buildkit/util/tracing/detect/threadsafe.go deleted file mode 100644 index 51d14448df..0000000000 --- a/vendor/github.com/moby/buildkit/util/tracing/detect/threadsafe.go +++ /dev/null @@ -1,26 +0,0 @@ -package detect - -import ( - "context" - "sync" - - sdktrace "go.opentelemetry.io/otel/sdk/trace" -) - -// threadSafeExporterWrapper wraps an OpenTelemetry SpanExporter and makes it thread-safe. -type threadSafeExporterWrapper struct { - mu sync.Mutex - exporter sdktrace.SpanExporter -} - -func (tse *threadSafeExporterWrapper) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { - tse.mu.Lock() - defer tse.mu.Unlock() - return tse.exporter.ExportSpans(ctx, spans) -} - -func (tse *threadSafeExporterWrapper) Shutdown(ctx context.Context) error { - tse.mu.Lock() - defer tse.mu.Unlock() - return tse.exporter.Shutdown(ctx) -} diff --git a/vendor/github.com/moby/buildkit/util/tracing/tracing.go b/vendor/github.com/moby/buildkit/util/tracing/tracing.go index fd7f0ba7d5..97f538f575 100644 --- a/vendor/github.com/moby/buildkit/util/tracing/tracing.go +++ b/vendor/github.com/moby/buildkit/util/tracing/tracing.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" ) diff --git a/vendor/github.com/moby/buildkit/util/tracing/transform/instrumentation.go b/vendor/github.com/moby/buildkit/util/tracing/transform/instrumentation.go index 216a364c63..6a965f0e86 100644 --- a/vendor/github.com/moby/buildkit/util/tracing/transform/instrumentation.go +++ b/vendor/github.com/moby/buildkit/util/tracing/transform/instrumentation.go @@ -6,12 +6,12 @@ import ( "go.opentelemetry.io/otel/sdk/instrumentation" ) -func instrumentationLibrary(il *commonpb.InstrumentationLibrary) instrumentation.Library { - if il == nil { - return instrumentation.Library{} +func instrumentationScope(is *commonpb.InstrumentationScope) instrumentation.Scope { + if is == nil { + return instrumentation.Scope{} } - return instrumentation.Library{ - Name: il.Name, - Version: il.Version, + return instrumentation.Scope{ + Name: is.Name, + Version: is.Version, } } diff --git a/vendor/github.com/moby/buildkit/util/tracing/transform/span.go b/vendor/github.com/moby/buildkit/util/tracing/transform/span.go index f07d0c98e9..9f7924c4a7 100644 --- a/vendor/github.com/moby/buildkit/util/tracing/transform/span.go +++ b/vendor/github.com/moby/buildkit/util/tracing/transform/span.go @@ -31,12 +31,12 @@ func Spans(sdl []*tracepb.ResourceSpans) []tracesdk.ReadOnlySpan { continue } - for _, sdi := range sd.InstrumentationLibrarySpans { + for _, sdi := range sd.ScopeSpans { sda := make([]tracesdk.ReadOnlySpan, len(sdi.Spans)) for i, s := range sdi.Spans { sda[i] = &readOnlySpan{ pb: s, - il: sdi.InstrumentationLibrary, + is: sdi.Scope, resource: sd.Resource, schemaURL: sd.SchemaUrl, } @@ -53,7 +53,7 @@ type readOnlySpan struct { tracesdk.ReadOnlySpan pb *tracepb.Span - il *v11.InstrumentationLibrary + is *v11.InstrumentationScope resource *v1.Resource schemaURL string } @@ -122,8 +122,13 @@ func (s *readOnlySpan) Status() tracesdk.Status { } } +func (s *readOnlySpan) InstrumentationScope() instrumentation.Scope { + return instrumentationScope(s.is) +} + +// Deprecated: use InstrumentationScope. func (s *readOnlySpan) InstrumentationLibrary() instrumentation.Library { - return instrumentationLibrary(s.il) + return s.InstrumentationScope() } // Resource returns information about the entity that produced the span. diff --git a/vendor/github.com/moby/buildkit/util/winlayers/applier.go b/vendor/github.com/moby/buildkit/util/winlayers/applier.go index f2b147d674..e415a5e876 100644 --- a/vendor/github.com/moby/buildkit/util/winlayers/applier.go +++ b/vendor/github.com/moby/buildkit/util/winlayers/applier.go @@ -37,6 +37,12 @@ type winApplier struct { } func (s *winApplier) Apply(ctx context.Context, desc ocispecs.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispecs.Descriptor, err error) { + // HACK:, containerd doesn't know about vnd.docker.image.rootfs.diff.tar.zstd, but that + // media type is compatible w/ the oci type, so just lie and say it's the oci type + if desc.MediaType == images.MediaTypeDockerSchema2Layer+".zstd" { + desc.MediaType = ocispecs.MediaTypeImageLayerZstd + } + if !hasWindowsLayerMode(ctx) { return s.apply(ctx, desc, mounts, opts...) } diff --git a/vendor/github.com/moby/buildkit/util/winlayers/differ.go b/vendor/github.com/moby/buildkit/util/winlayers/differ.go index fe2b1c2161..effe0c16cb 100644 --- a/vendor/github.com/moby/buildkit/util/winlayers/differ.go +++ b/vendor/github.com/moby/buildkit/util/winlayers/differ.go @@ -15,12 +15,10 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" + log "github.com/moby/buildkit/util/bklog" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - log "github.com/moby/buildkit/util/bklog" ) const ( @@ -109,7 +107,7 @@ func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opt if err != nil { return errors.Wrap(err, "failed to get compressed stream") } - w, discard, done := makeWindowsLayer(io.MultiWriter(compressed, dgstr.Hash())) + w, discard, done := makeWindowsLayer(ctx, io.MultiWriter(compressed, dgstr.Hash())) err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot) if err != nil { discard(err) @@ -125,7 +123,7 @@ func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opt } config.Labels["containerd.io/uncompressed"] = dgstr.Digest().String() } else { - w, discard, done := makeWindowsLayer(cw) + w, discard, done := makeWindowsLayer(ctx, cw) if err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot); err != nil { discard(err) return errors.Wrap(err, "failed to write diff") @@ -203,7 +201,7 @@ func addSecurityDescriptor(h *tar.Header) { } } -func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) { +func makeWindowsLayer(ctx context.Context, w io.Writer) (io.Writer, func(error), chan error) { pr, pw := io.Pipe() done := make(chan error) @@ -259,7 +257,7 @@ func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) { return tarWriter.Close() }() if err != nil { - logrus.Errorf("makeWindowsLayer %+v", err) + log.G(ctx).Errorf("makeWindowsLayer %+v", err) } pw.CloseWithError(err) done <- err diff --git a/vendor/github.com/moby/buildkit/version/ua.go b/vendor/github.com/moby/buildkit/version/ua.go new file mode 100644 index 0000000000..01cfe67cd0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/version/ua.go @@ -0,0 +1,49 @@ +package version + +import ( + "fmt" + "regexp" + "strings" + "sync" +) + +var ( + reRelease *regexp.Regexp + reDev *regexp.Regexp + reOnce sync.Once + uapCbs map[string]func() string +) + +func UserAgent() string { + uaVersion := defaultVersion + + reOnce.Do(func() { + reRelease = regexp.MustCompile(`^(v[0-9]+\.[0-9]+)\.[0-9]+$`) + reDev = regexp.MustCompile(`^(v[0-9]+\.[0-9]+)\.[0-9]+`) + }) + + if matches := reRelease.FindAllStringSubmatch(Version, 1); len(matches) > 0 { + uaVersion = matches[0][1] + } else if matches := reDev.FindAllStringSubmatch(Version, 1); len(matches) > 0 { + uaVersion = matches[0][1] + "-dev" + } + + res := &strings.Builder{} + fmt.Fprintf(res, "buildkit/%s", uaVersion) + for pname, pver := range uapCbs { + fmt.Fprintf(res, " %s/%s", pname, pver()) + } + + return res.String() +} + +// SetUserAgentProduct sets a callback to get the version of a product to be +// included in the User-Agent header. The callback is called every time the +// User-Agent header is generated. Caller must ensure that the callback is +// cached if it is expensive to compute. +func SetUserAgentProduct(name string, cb func() (version string)) { + if uapCbs == nil { + uapCbs = make(map[string]func() string) + } + uapCbs[name] = cb +} diff --git a/vendor/github.com/moby/buildkit/version/version.go b/vendor/github.com/moby/buildkit/version/version.go index 49640f0f86..9cddea63c0 100644 --- a/vendor/github.com/moby/buildkit/version/version.go +++ b/vendor/github.com/moby/buildkit/version/version.go @@ -17,13 +17,8 @@ package version -import ( - "regexp" - "sync" -) - const ( - defaultVersion = "0.0.0+unknown" + defaultVersion = "v0.0.0+unknown" ) var ( @@ -37,26 +32,3 @@ var ( // the program at linking time. Revision = "" ) - -var ( - reRelease *regexp.Regexp - reDev *regexp.Regexp - reOnce sync.Once -) - -func UserAgent() string { - uaVersion := defaultVersion - - reOnce.Do(func() { - reRelease = regexp.MustCompile(`^(v[0-9]+\.[0-9]+)\.[0-9]+$`) - reDev = regexp.MustCompile(`^(v[0-9]+\.[0-9]+)\.[0-9]+`) - }) - - if matches := reRelease.FindAllStringSubmatch(Version, 1); len(matches) > 0 { - uaVersion = matches[0][1] - } else if matches := reDev.FindAllStringSubmatch(Version, 1); len(matches) > 0 { - uaVersion = matches[0][1] + "-dev" - } - - return "buildkit/" + uaVersion -} diff --git a/vendor/github.com/moby/buildkit/worker/base/worker.go b/vendor/github.com/moby/buildkit/worker/base/worker.go index 2c3e4defd1..8d402ff2fb 100644 --- a/vendor/github.com/moby/buildkit/worker/base/worker.go +++ b/vendor/github.com/moby/buildkit/worker/base/worker.go @@ -11,7 +11,6 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/gc" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/leases" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes/docker" "github.com/docker/docker/pkg/idtools" @@ -21,6 +20,7 @@ import ( "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/executor/resources" "github.com/moby/buildkit/exporter" imageexporter "github.com/moby/buildkit/exporter/containerimage" localexporter "github.com/moby/buildkit/exporter/local" @@ -30,6 +30,7 @@ import ( "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" + containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/snapshot/imagerefchecker" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/llbsolver/mounts" @@ -42,6 +43,7 @@ import ( "github.com/moby/buildkit/source/local" "github.com/moby/buildkit/util/archutil" "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/network" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/progress/controller" @@ -67,17 +69,18 @@ type WorkerOpt struct { NetworkProviders map[pb.NetMode]network.Provider Executor executor.Executor Snapshotter snapshot.Snapshotter - ContentStore content.Store + ContentStore *containerdsnapshot.Store Applier diff.Applier Differ diff.Comparer ImageStore images.Store // optional RegistryHosts docker.RegistryHosts IdentityMapping *idtools.IdentityMapping - LeaseManager leases.Manager + LeaseManager *leaseutil.Manager GarbageCollect func(context.Context) (gc.Stats, error) ParallelismSem *semaphore.Weighted MetadataStore *metadata.Store MountPoolRoot string + ResourceMonitor *resources.Monitor } // Worker is a local worker instance with dedicated snapshotter, cache, and so on. @@ -213,14 +216,19 @@ func (w *Worker) Close() error { rerr = multierror.Append(rerr, err) } } + if w.ResourceMonitor != nil { + if err := w.ResourceMonitor.Close(); err != nil { + rerr = multierror.Append(rerr, err) + } + } return rerr } -func (w *Worker) ContentStore() content.Store { +func (w *Worker) ContentStore() *containerdsnapshot.Store { return w.WorkerOpt.ContentStore } -func (w *Worker) LeaseManager() leases.Manager { +func (w *Worker) LeaseManager() *leaseutil.Manager { return w.WorkerOpt.LeaseManager } @@ -354,7 +362,7 @@ func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error { return nil } -func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) { +func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) { // is this an registry source? Or an OCI layout source? switch opt.ResolverType { case llb.ResolverTypeOCILayout: diff --git a/vendor/github.com/moby/buildkit/worker/cacheresult.go b/vendor/github.com/moby/buildkit/worker/cacheresult.go index a635a53502..50f7c93688 100644 --- a/vendor/github.com/moby/buildkit/worker/cacheresult.go +++ b/vendor/github.com/moby/buildkit/worker/cacheresult.go @@ -95,8 +95,8 @@ func (s *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheRe } return remotes, nil } -func (s *cacheResultStorage) Exists(id string) bool { - ref, err := s.load(context.TODO(), id, true) +func (s *cacheResultStorage) Exists(ctx context.Context, id string) bool { + ref, err := s.load(ctx, id, true) if err != nil { return false } diff --git a/vendor/github.com/moby/buildkit/worker/containerd/containerd.go b/vendor/github.com/moby/buildkit/worker/containerd/containerd.go index a829d45757..e8d948d0e8 100644 --- a/vendor/github.com/moby/buildkit/worker/containerd/containerd.go +++ b/vendor/github.com/moby/buildkit/worker/containerd/containerd.go @@ -10,7 +10,7 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/gc" "github.com/containerd/containerd/leases" - gogoptypes "github.com/gogo/protobuf/types" + ptypes "github.com/containerd/containerd/protobuf/types" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/executor/containerdexecutor" @@ -53,7 +53,7 @@ func newContainerd(root string, client *containerd.Client, snapshotterName, ns s return base.WorkerOpt{}, err } - serverInfo, err := client.IntrospectionService().Server(context.TODO(), &gogoptypes.Empty{}) + serverInfo, err := client.IntrospectionService().Server(context.TODO(), &ptypes.Empty{}) if err != nil { return base.WorkerOpt{}, err } diff --git a/vendor/github.com/moby/buildkit/worker/worker.go b/vendor/github.com/moby/buildkit/worker/worker.go index 2f426e9ead..d62047e9fb 100644 --- a/vendor/github.com/moby/buildkit/worker/worker.go +++ b/vendor/github.com/moby/buildkit/worker/worker.go @@ -4,8 +4,6 @@ import ( "context" "io" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/leases" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" @@ -13,7 +11,9 @@ import ( "github.com/moby/buildkit/exporter" "github.com/moby/buildkit/frontend" "github.com/moby/buildkit/session" + containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/leaseutil" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -30,16 +30,16 @@ type Worker interface { LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) // ResolveOp resolves Vertex.Sys() to Op implementation. ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) - ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) + ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) Prune(ctx context.Context, ch chan client.UsageInfo, opt ...client.PruneInfo) error FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) PruneCacheMounts(ctx context.Context, ids []string) error - ContentStore() content.Store + ContentStore() *containerdsnapshot.Store Executor() executor.Executor CacheManager() cache.Manager - LeaseManager() leases.Manager + LeaseManager() *leaseutil.Manager } type Infos interface { diff --git a/vendor/github.com/spdx/tools-golang/convert/chain.go b/vendor/github.com/spdx/tools-golang/convert/chain.go new file mode 100644 index 0000000000..ac96733c1b --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/convert/chain.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package convert + +import ( + "fmt" + "reflect" + + converter "github.com/anchore/go-struct-converter" + + "github.com/spdx/tools-golang/spdx/common" + "github.com/spdx/tools-golang/spdx/v2/v2_1" + "github.com/spdx/tools-golang/spdx/v2/v2_2" + "github.com/spdx/tools-golang/spdx/v2/v2_3" +) + +var DocumentChain = converter.NewChain( + v2_1.Document{}, + v2_2.Document{}, + v2_3.Document{}, +) + +// Document converts from one document to another document +// For example, converting a document to the latest version could be done like: +// +// sourceDoc := // e.g. a v2_2.Document from somewhere +// var targetDoc spdx.Document // this can be any document version +// err := convert.Document(sourceDoc, &targetDoc) // the target must be passed as a pointer +func Document(from common.AnyDocument, to common.AnyDocument) error { + if !IsPtr(to) { + return fmt.Errorf("struct to convert to must be a pointer") + } + from = FromPtr(from) + if reflect.TypeOf(from) == reflect.TypeOf(FromPtr(to)) { + reflect.ValueOf(to).Elem().Set(reflect.ValueOf(from)) + return nil + } + return DocumentChain.Convert(from, to) +} diff --git a/vendor/github.com/spdx/tools-golang/convert/struct.go b/vendor/github.com/spdx/tools-golang/convert/struct.go new file mode 100644 index 0000000000..7223dbdbd4 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/convert/struct.go @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package convert + +import ( + "fmt" + "reflect" + + "github.com/spdx/tools-golang/spdx/common" +) + +// FromPtr accepts a document or a document pointer and returns the direct struct reference +func FromPtr(doc common.AnyDocument) common.AnyDocument { + value := reflect.ValueOf(doc) + for value.Type().Kind() == reflect.Ptr { + value = value.Elem() + } + return value.Interface() +} + +func IsPtr(obj common.AnyDocument) bool { + t := reflect.TypeOf(obj) + if t.Kind() == reflect.Interface { + t = t.Elem() + } + return t.Kind() == reflect.Ptr +} + +func Describe(o interface{}) string { + value := reflect.ValueOf(o) + typ := value.Type() + prefix := "" + for typ.Kind() == reflect.Ptr { + prefix += "*" + value = value.Elem() + typ = value.Type() + } + str := limit(fmt.Sprintf("%+v", value.Interface()), 300) + name := fmt.Sprintf("%s.%s%s", typ.PkgPath(), prefix, typ.Name()) + return fmt.Sprintf("%s: %s", name, str) +} + +func limit(text string, length int) string { + if length <= 0 || len(text) <= length+3 { + return text + } + r := []rune(text) + r = r[:length] + return string(r) + "..." +} diff --git a/vendor/github.com/spdx/tools-golang/json/parser.go b/vendor/github.com/spdx/tools-golang/json/parser.go deleted file mode 100644 index ee7915de0f..0000000000 --- a/vendor/github.com/spdx/tools-golang/json/parser.go +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later - -package spdx_json - -import ( - "bytes" - "encoding/json" - "io" - - "github.com/spdx/tools-golang/spdx/v2_2" - "github.com/spdx/tools-golang/spdx/v2_3" -) - -// Load2_2 takes in an io.Reader and returns an SPDX document. -func Load2_2(content io.Reader) (*v2_2.Document, error) { - // convert io.Reader to a slice of bytes and call the parser - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(content) - if err != nil { - return nil, err - } - - var doc v2_2.Document - err = json.Unmarshal(buf.Bytes(), &doc) - if err != nil { - return nil, err - } - - return &doc, nil -} - -// Load2_3 takes in an io.Reader and returns an SPDX document. -func Load2_3(content io.Reader) (*v2_3.Document, error) { - // convert io.Reader to a slice of bytes and call the parser - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(content) - if err != nil { - return nil, err - } - - var doc v2_3.Document - err = json.Unmarshal(buf.Bytes(), &doc) - if err != nil { - return nil, err - } - - return &doc, nil -} diff --git a/vendor/github.com/spdx/tools-golang/json/reader.go b/vendor/github.com/spdx/tools-golang/json/reader.go new file mode 100644 index 0000000000..f1a0b989af --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/json/reader.go @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "github.com/spdx/tools-golang/convert" + "github.com/spdx/tools-golang/spdx" + "github.com/spdx/tools-golang/spdx/common" + "github.com/spdx/tools-golang/spdx/v2/v2_1" + "github.com/spdx/tools-golang/spdx/v2/v2_2" + "github.com/spdx/tools-golang/spdx/v2/v2_3" +) + +// Read takes an io.Reader and returns a fully-parsed current model SPDX Document +// or an error if any error is encountered. +func Read(content io.Reader) (*spdx.Document, error) { + doc := spdx.Document{} + err := ReadInto(content, &doc) + return &doc, err +} + +// ReadInto takes an io.Reader, reads in the SPDX document at the version provided +// and converts to the doc version +func ReadInto(content io.Reader, doc common.AnyDocument) error { + if !convert.IsPtr(doc) { + return fmt.Errorf("doc to read into must be a pointer") + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(content) + if err != nil { + return err + } + + var data interface{} + err = json.Unmarshal(buf.Bytes(), &data) + if err != nil { + return err + } + + val, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("not a valid SPDX JSON document") + } + + version, ok := val["spdxVersion"] + if !ok { + return fmt.Errorf("JSON document does not contain spdxVersion field") + } + + switch version { + case v2_1.Version: + var doc v2_1.Document + err = json.Unmarshal(buf.Bytes(), &doc) + if err != nil { + return err + } + data = doc + case v2_2.Version: + var doc v2_2.Document + err = json.Unmarshal(buf.Bytes(), &doc) + if err != nil { + return err + } + data = doc + case v2_3.Version: + var doc v2_3.Document + err = json.Unmarshal(buf.Bytes(), &doc) + if err != nil { + return err + } + data = doc + default: + return fmt.Errorf("unsupported SDPX version: %s", version) + } + + return convert.Document(data, doc) +} diff --git a/vendor/github.com/spdx/tools-golang/json/writer.go b/vendor/github.com/spdx/tools-golang/json/writer.go index 8f2b94dc60..a944dccb9e 100644 --- a/vendor/github.com/spdx/tools-golang/json/writer.go +++ b/vendor/github.com/spdx/tools-golang/json/writer.go @@ -1,41 +1,33 @@ // SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later -package spdx_json +package json import ( "encoding/json" - "github.com/spdx/tools-golang/spdx/v2_3" "io" - "github.com/spdx/tools-golang/spdx/v2_2" + "github.com/spdx/tools-golang/spdx/common" ) -// Save2_2 takes an SPDX Document (version 2.2) and an io.Writer, and writes the document to the writer in JSON format. -func Save2_2(doc *v2_2.Document, w io.Writer) error { - buf, err := json.Marshal(doc) - if err != nil { - return err - } +type WriteOption func(*json.Encoder) - _, err = w.Write(buf) - if err != nil { - return err +func Indent(indent string) WriteOption { + return func(e *json.Encoder) { + e.SetIndent("", indent) } - - return nil } -// Save2_3 takes an SPDX Document (version 2.2) and an io.Writer, and writes the document to the writer in JSON format. -func Save2_3(doc *v2_3.Document, w io.Writer) error { - buf, err := json.Marshal(doc) - if err != nil { - return err +func EscapeHTML(escape bool) WriteOption { + return func(e *json.Encoder) { + e.SetEscapeHTML(escape) } - - _, err = w.Write(buf) - if err != nil { - return err - } - - return nil +} + +// Write takes an SPDX Document and an io.Writer, and writes the document to the writer in JSON format. +func Write(doc common.AnyDocument, w io.Writer, opts ...WriteOption) error { + e := json.NewEncoder(w) + for _, opt := range opts { + opt(e) + } + return e.Encode(doc) } diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/types.go b/vendor/github.com/spdx/tools-golang/spdx/common/types.go new file mode 100644 index 0000000000..059d62f22b --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/types.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +// AnyDocument a placeholder for allowing any SPDX document to be used in function args +type AnyDocument interface{} diff --git a/vendor/github.com/spdx/tools-golang/spdx/model.go b/vendor/github.com/spdx/tools-golang/spdx/model.go new file mode 100644 index 0000000000..e91856b0e5 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/model.go @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +// Package spdx contains references to the latest spdx version +package spdx + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" + latest "github.com/spdx/tools-golang/spdx/v2/v2_3" +) + +const ( + Version = latest.Version + DataLicense = latest.DataLicense +) + +type ( + Annotation = latest.Annotation + ArtifactOfProject = latest.ArtifactOfProject + CreationInfo = latest.CreationInfo + Document = latest.Document + ExternalDocumentRef = latest.ExternalDocumentRef + File = latest.File + OtherLicense = latest.OtherLicense + Package = latest.Package + PackageExternalReference = latest.PackageExternalReference + Relationship = latest.Relationship + Review = latest.Review + Snippet = latest.Snippet +) + +type ( + Annotator = common.Annotator + Checksum = common.Checksum + ChecksumAlgorithm = common.ChecksumAlgorithm + Creator = common.Creator + DocElementID = common.DocElementID + ElementID = common.ElementID + Originator = common.Originator + PackageVerificationCode = common.PackageVerificationCode + SnippetRange = common.SnippetRange + SnippetRangePointer = common.SnippetRangePointer + Supplier = common.Supplier +) + +const ( + SHA224 = common.SHA224 + SHA1 = common.SHA1 + SHA256 = common.SHA256 + SHA384 = common.SHA384 + SHA512 = common.SHA512 + MD2 = common.MD2 + MD4 = common.MD4 + MD5 = common.MD5 + MD6 = common.MD6 + SHA3_256 = common.SHA3_256 + SHA3_384 = common.SHA3_384 + SHA3_512 = common.SHA3_512 + BLAKE2b_256 = common.BLAKE2b_256 + BLAKE2b_384 = common.BLAKE2b_384 + BLAKE2b_512 = common.BLAKE2b_512 + BLAKE3 = common.BLAKE3 + ADLER32 = common.ADLER32 +) + +const ( + // F.2 Security types + CategorySecurity = common.CategorySecurity + SecurityCPE23Type = common.TypeSecurityCPE23Type + SecurityCPE22Type = common.TypeSecurityCPE22Type + SecurityAdvisory = common.TypeSecurityAdvisory + SecurityFix = common.TypeSecurityFix + SecurityUrl = common.TypeSecurityUrl + SecuritySwid = common.TypeSecuritySwid + + // F.3 Package-Manager types + CategoryPackageManager = common.CategoryPackageManager + PackageManagerMavenCentral = common.TypePackageManagerMavenCentral + PackageManagerNpm = common.TypePackageManagerNpm + PackageManagerNuGet = common.TypePackageManagerNuGet + PackageManagerBower = common.TypePackageManagerBower + PackageManagerPURL = common.TypePackageManagerPURL + + // F.4 Persistent-Id types + CategoryPersistentId = common.CategoryPersistentId + TypePersistentIdSwh = common.TypePersistentIdSwh + TypePersistentIdGitoid = common.TypePersistentIdGitoid + + // 11.1 Relationship field types + RelationshipDescribes = common.TypeRelationshipDescribe + RelationshipDescribedBy = common.TypeRelationshipDescribeBy + RelationshipContains = common.TypeRelationshipContains + RelationshipContainedBy = common.TypeRelationshipContainedBy + RelationshipDependsOn = common.TypeRelationshipDependsOn + RelationshipDependencyOf = common.TypeRelationshipDependencyOf + RelationshipBuildDependencyOf = common.TypeRelationshipBuildDependencyOf + RelationshipDevDependencyOf = common.TypeRelationshipDevDependencyOf + RelationshipOptionalDependencyOf = common.TypeRelationshipOptionalDependencyOf + RelationshipProvidedDependencyOf = common.TypeRelationshipProvidedDependencyOf + RelationshipTestDependencyOf = common.TypeRelationshipTestDependencyOf + RelationshipRuntimeDependencyOf = common.TypeRelationshipRuntimeDependencyOf + RelationshipExampleOf = common.TypeRelationshipExampleOf + RelationshipGenerates = common.TypeRelationshipGenerates + RelationshipGeneratedFrom = common.TypeRelationshipGeneratedFrom + RelationshipAncestorOf = common.TypeRelationshipAncestorOf + RelationshipDescendantOf = common.TypeRelationshipDescendantOf + RelationshipVariantOf = common.TypeRelationshipVariantOf + RelationshipDistributionArtifact = common.TypeRelationshipDistributionArtifact + RelationshipPatchFor = common.TypeRelationshipPatchFor + RelationshipPatchApplied = common.TypeRelationshipPatchApplied + RelationshipCopyOf = common.TypeRelationshipCopyOf + RelationshipFileAdded = common.TypeRelationshipFileAdded + RelationshipFileDeleted = common.TypeRelationshipFileDeleted + RelationshipFileModified = common.TypeRelationshipFileModified + RelationshipExpandedFromArchive = common.TypeRelationshipExpandedFromArchive + RelationshipDynamicLink = common.TypeRelationshipDynamicLink + RelationshipStaticLink = common.TypeRelationshipStaticLink + RelationshipDataFileOf = common.TypeRelationshipDataFileOf + RelationshipTestCaseOf = common.TypeRelationshipTestCaseOf + RelationshipBuildToolOf = common.TypeRelationshipBuildToolOf + RelationshipDevToolOf = common.TypeRelationshipDevToolOf + RelationshipTestOf = common.TypeRelationshipTestOf + RelationshipTestToolOf = common.TypeRelationshipTestToolOf + RelationshipDocumentationOf = common.TypeRelationshipDocumentationOf + RelationshipOptionalComponentOf = common.TypeRelationshipOptionalComponentOf + RelationshipMetafileOf = common.TypeRelationshipMetafileOf + RelationshipPackageOf = common.TypeRelationshipPackageOf + RelationshipAmends = common.TypeRelationshipAmends + RelationshipPrerequisiteFor = common.TypeRelationshipPrerequisiteFor + RelationshipHasPrerequisite = common.TypeRelationshipHasPrerequisite + RelationshipRequirementDescriptionFor = common.TypeRelationshipRequirementDescriptionFor + RelationshipSpecificationFor = common.TypeRelationshipSpecificationFor + RelationshipOther = common.TypeRelationshipOther +) diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/annotation.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/common/annotation.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/annotation.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/checksum.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/checksum.go similarity index 91% rename from vendor/github.com/spdx/tools-golang/spdx/common/checksum.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/checksum.go index aa2ae52ff1..d4969ef846 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/common/checksum.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/common/checksum.go @@ -5,7 +5,7 @@ package common // ChecksumAlgorithm represents the algorithm used to generate the file checksum in the Checksum struct. type ChecksumAlgorithm string -// The checksum algorithms mentioned in the spdxv2.2.0 https://spdx.github.io/spdx-spec/4-file-information/#44-file-checksum +// The checksum algorithms mentioned in the spec https://spdx.github.io/spdx-spec/4-file-information/#44-file-checksum const ( SHA224 ChecksumAlgorithm = "SHA224" SHA1 ChecksumAlgorithm = "SHA1" diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/creation_info.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/common/creation_info.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/creation_info.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/external.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/external.go similarity index 93% rename from vendor/github.com/spdx/tools-golang/spdx/common/external.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/external.go index 59c3f0f03f..8344ac6162 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/common/external.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/common/external.go @@ -4,7 +4,9 @@ package common // Constants for various string types const ( + // F.2 Security types + CategorySecurity string = "SECURITY" TypeSecurityCPE23Type string = "cpe23Type" TypeSecurityCPE22Type string = "cpe22Type" TypeSecurityAdvisory string = "advisory" @@ -13,11 +15,16 @@ const ( TypeSecuritySwid string = "swid" // F.3 Package-Manager types + CategoryPackageManager string = "PACKAGE-MANAGER" TypePackageManagerMavenCentral string = "maven-central" TypePackageManagerNpm string = "npm" TypePackageManagerNuGet string = "nuget" TypePackageManagerBower string = "bower" TypePackageManagerPURL string = "purl" + // F.4 Persistent-Id types + CategoryPersistentId string = "PERSISTENT-ID" + TypePersistentIdSwh string = "swh" + TypePersistentIdGitoid string = "gitoid" // 11.1 Relationship field types TypeRelationshipDescribe string = "DESCRIBES" diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/identifier.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/identifier.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/common/identifier.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/identifier.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/package.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/package.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/common/package.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/package.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/snippet.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/common/snippet.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/snippet.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/annotation.go new file mode 100644 index 0000000000..c80f64cfde --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/annotation.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// Annotation is an Annotation section of an SPDX Document for version 2.1 of the spec. +type Annotation struct { + // 8.1: Annotator + // Cardinality: conditional (mandatory, one) if there is an Annotation + Annotator common.Annotator `json:"annotator"` + + // 8.2: Annotation Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationDate string `json:"annotationDate"` + + // 8.3: Annotation Type: "REVIEW" or "OTHER" + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationType string `json:"annotationType"` + + // 8.4: SPDX Identifier Reference + // Cardinality: conditional (mandatory, one) if there is an Annotation + // This field is not used in hierarchical data formats where the referenced element is clear, such as JSON or YAML. + AnnotationSPDXIdentifier common.DocElementID `json:"-"` + + // 8.5: Annotation Comment + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationComment string `json:"comment"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/creation_info.go new file mode 100644 index 0000000000..c75e8ea810 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/creation_info.go @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// CreationInfo is a Document Creation Information section of an +// SPDX Document for version 2.1 of the spec. +type CreationInfo struct { + // 2.7: License List Version + // Cardinality: optional, one + LicenseListVersion string `json:"licenseListVersion,omitempty"` + + // 2.8: Creators: may have multiple keys for Person, Organization + // and/or Tool + // Cardinality: mandatory, one or many + Creators []common.Creator `json:"creators"` + + // 2.9: Created: data format YYYY-MM-DDThh:mm:ssZ + // Cardinality: mandatory, one + Created string `json:"created"` + + // 2.10: Creator Comment + // Cardinality: optional, one + CreatorComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/document.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/document.go similarity index 69% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/document.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/document.go index 31ac08b6c7..60a27c44d8 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/document.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/document.go @@ -1,12 +1,19 @@ // Package spdx contains the struct definition for an SPDX Document // and its constituent parts. // SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later -package v2_2 +package v2_1 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/anchore/go-struct-converter" + + "github.com/spdx/tools-golang/spdx/v2/common" +) + +const Version = "SPDX-2.1" +const DataLicense = "CC0-1.0" // ExternalDocumentRef is a reference to an external SPDX document -// as defined in section 6.6 for version 2.2 of the spec. +// as defined in section 2.6 for version 2.1 of the spec. type ExternalDocumentRef struct { // DocumentRefID is the ID string defined in the start of the // reference. It should _not_ contain the "DocumentRef-" part @@ -20,35 +27,35 @@ type ExternalDocumentRef struct { Checksum common.Checksum `json:"checksum"` } -// Document is an SPDX Document for version 2.2 of the spec. -// See https://spdx.github.io/spdx-spec/v2-draft/ (DRAFT) +// Document is an SPDX Document for version 2.1 of the spec. +// See https://spdx.org/sites/cpstandard/files/pages/files/spdxversion2.1.pdf type Document struct { - // 6.1: SPDX Version; should be in the format "SPDX-2.2" + // 2.1: SPDX Version; should be in the format "SPDX-2.1" // Cardinality: mandatory, one SPDXVersion string `json:"spdxVersion"` - // 6.2: Data License; should be "CC0-1.0" + // 2.2: Data License; should be "CC0-1.0" // Cardinality: mandatory, one DataLicense string `json:"dataLicense"` - // 6.3: SPDX Identifier; should be "DOCUMENT" to represent + // 2.3: SPDX Identifier; should be "DOCUMENT" to represent // mandatory identifier of SPDXRef-DOCUMENT // Cardinality: mandatory, one SPDXIdentifier common.ElementID `json:"SPDXID"` - // 6.4: Document Name + // 2.4: Document Name // Cardinality: mandatory, one DocumentName string `json:"name"` - // 6.5: Document Namespace + // 2.5: Document Namespace // Cardinality: mandatory, one DocumentNamespace string `json:"documentNamespace"` - // 6.6: External Document References + // 2.6: External Document References // Cardinality: optional, one or many ExternalDocumentReferences []ExternalDocumentRef `json:"externalDocumentRefs,omitempty"` - // 6.11: Document Comment + // 2.11: Document Comment // Cardinality: optional, one DocumentComment string `json:"comment,omitempty"` @@ -63,3 +70,10 @@ type Document struct { // DEPRECATED in version 2.0 of spec Reviews []*Review `json:"-"` } + +func (d *Document) ConvertFrom(_ interface{}) error { + d.SPDXVersion = Version + return nil +} + +var _ converter.ConvertFrom = (*Document)(nil) diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/file.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/file.go new file mode 100644 index 0000000000..50bdcf1a22 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/file.go @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// File is a File section of an SPDX Document for version 2.1 of the spec. +type File struct { + // 4.1: File Name + // Cardinality: mandatory, one + FileName string `json:"fileName"` + + // 4.2: File SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + FileSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 4.3: File Types + // Cardinality: optional, multiple + FileTypes []string `json:"fileTypes,omitempty"` + + // 4.4: File Checksum: may have keys for SHA1, SHA256 and/or MD5 + // Cardinality: mandatory, one SHA1, others may be optionally provided + Checksums []common.Checksum `json:"checksums"` + + // 4.5: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + LicenseConcluded string `json:"licenseConcluded"` + + // 4.6: License Information in File: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one or many + LicenseInfoInFiles []string `json:"licenseInfoInFiles"` + + // 4.7: Comments on License + // Cardinality: optional, one + LicenseComments string `json:"licenseComments,omitempty"` + + // 4.8: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + FileCopyrightText string `json:"copyrightText"` + + // DEPRECATED in version 2.1 of spec + // 4.9-4.11: Artifact of Project variables (defined below) + // Cardinality: optional, one or many + ArtifactOfProjects []*ArtifactOfProject `json:"-"` + + // 4.12: File Comment + // Cardinality: optional, one + FileComment string `json:"comment,omitempty"` + + // 4.13: File Notice + // Cardinality: optional, one + FileNotice string `json:"noticeText,omitempty"` + + // 4.14: File Contributor + // Cardinality: optional, one or many + FileContributors []string `json:"fileContributors,omitempty"` + + // DEPRECATED in version 2.0 of spec + // 4.15: File Dependencies + // Cardinality: optional, one or many + FileDependencies []string `json:"-"` + + // Snippets contained in this File + // Note that Snippets could be defined in a different Document! However, + // the only ones that _THIS_ document can contain are the ones that are + // defined here -- so this should just be an ElementID. + Snippets map[common.ElementID]*Snippet `json:"-"` + + Annotations []Annotation `json:"annotations,omitempty"` +} + +// ArtifactOfProject is a DEPRECATED collection of data regarding +// a Package, as defined in sections 4.9-4.11 in version 2.1 of the spec. +type ArtifactOfProject struct { + + // DEPRECATED in version 2.1 of spec + // 4.9: Artifact of Project Name + // Cardinality: conditional, required if present, one per AOP + Name string + + // DEPRECATED in version 2.1 of spec + // 4.10: Artifact of Project Homepage: URL or "UNKNOWN" + // Cardinality: optional, one per AOP + HomePage string + + // DEPRECATED in version 2.1 of spec + // 4.11: Artifact of Project Uniform Resource Identifier + // Cardinality: optional, one per AOP + URI string +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/other_license.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/other_license.go new file mode 100644 index 0000000000..6ae09feb6f --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/other_license.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +// OtherLicense is an Other License Information section of an +// SPDX Document for version 2.1 of the spec. +type OtherLicense struct { + // 6.1: License Identifier: "LicenseRef-[idstring]" + // Cardinality: conditional (mandatory, one) if license is not + // on SPDX License List + LicenseIdentifier string `json:"licenseId"` + + // 6.2: Extracted Text + // Cardinality: conditional (mandatory, one) if there is a + // License Identifier assigned + ExtractedText string `json:"extractedText"` + + // 6.3: License Name: single line of text or "NOASSERTION" + // Cardinality: conditional (mandatory, one) if license is not + // on SPDX License List + LicenseName string `json:"name,omitempty"` + + // 6.4: License Cross Reference + // Cardinality: conditional (optional, one or many) if license + // is not on SPDX License List + LicenseCrossReferences []string `json:"seeAlsos,omitempty"` + + // 6.5: License Comment + // Cardinality: optional, one + LicenseComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/package.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/package.go new file mode 100644 index 0000000000..9800c2c23b --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/package.go @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// Package is a Package section of an SPDX Document for version 2.1 of the spec. +type Package struct { + // 3.1: Package Name + // Cardinality: mandatory, one + PackageName string `json:"name"` + + // 3.2: Package SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + PackageSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 3.3: Package Version + // Cardinality: optional, one + PackageVersion string `json:"versionInfo,omitempty"` + + // 3.4: Package File Name + // Cardinality: optional, one + PackageFileName string `json:"packageFileName,omitempty"` + + // 3.5: Package Supplier: may have single result for either Person or Organization, + // or NOASSERTION + // Cardinality: optional, one + PackageSupplier *common.Supplier `json:"supplier,omitempty"` + + // 3.6: Package Originator: may have single result for either Person or Organization, + // or NOASSERTION + // Cardinality: optional, one + PackageOriginator *common.Originator `json:"originator,omitempty"` + + // 3.7: Package Download Location + // Cardinality: mandatory, one + PackageDownloadLocation string `json:"downloadLocation"` + + // 3.8: FilesAnalyzed + // Cardinality: optional, one; default value is "true" if omitted + FilesAnalyzed bool `json:"filesAnalyzed,omitempty"` + // NOT PART OF SPEC: did FilesAnalyzed tag appear? + IsFilesAnalyzedTagPresent bool `json:"-"` + + // 3.9: Package Verification Code + PackageVerificationCode common.PackageVerificationCode `json:"packageVerificationCode,omitempty"` + + // 3.10: Package Checksum: may have keys for SHA1, SHA256 and/or MD5 + // Cardinality: optional, one or many + PackageChecksums []common.Checksum `json:"checksums,omitempty"` + + // 3.11: Package Home Page + // Cardinality: optional, one + PackageHomePage string `json:"homepage,omitempty"` + + // 3.12: Source Information + // Cardinality: optional, one + PackageSourceInfo string `json:"sourceInfo,omitempty"` + + // 3.13: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageLicenseConcluded string `json:"licenseConcluded"` + + // 3.14: All Licenses Info from Files: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one or many if filesAnalyzed is true / omitted; + // zero (must be omitted) if filesAnalyzed is false + PackageLicenseInfoFromFiles []string `json:"licenseInfoFromFiles"` + + // 3.15: Declared License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageLicenseDeclared string `json:"licenseDeclared"` + + // 3.16: Comments on License + // Cardinality: optional, one + PackageLicenseComments string `json:"licenseComments,omitempty"` + + // 3.17: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageCopyrightText string `json:"copyrightText"` + + // 3.18: Package Summary Description + // Cardinality: optional, one + PackageSummary string `json:"summary,omitempty"` + + // 3.19: Package Detailed Description + // Cardinality: optional, one + PackageDescription string `json:"description,omitempty"` + + // 3.20: Package Comment + // Cardinality: optional, one + PackageComment string `json:"comment,omitempty"` + + // 3.21: Package External Reference + // Cardinality: optional, one or many + PackageExternalReferences []*PackageExternalReference `json:"externalRefs,omitempty"` + + // Files contained in this Package + Files []*File `json:"files,omitempty"` + + Annotations []Annotation `json:"annotations,omitempty"` +} + +// PackageExternalReference is an External Reference to additional info +// about a Package, as defined in section 3.21 in version 2.1 of the spec. +type PackageExternalReference struct { + // category is "SECURITY", "PACKAGE-MANAGER" or "OTHER" + Category string `json:"referenceCategory"` + + // type is an [idstring] as defined in Appendix VI; + // called RefType here due to "type" being a Golang keyword + RefType string `json:"referenceType"` + + // locator is a unique string to access the package-specific + // info, metadata or content within the target location + Locator string `json:"referenceLocator"` + + // 3.22: Package External Reference Comment + // Cardinality: conditional (optional, one) for each External Reference + ExternalRefComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/relationship.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/relationship.go new file mode 100644 index 0000000000..827927aebd --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/relationship.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// Relationship is a Relationship section of an SPDX Document for +// version 2.1 of the spec. +type Relationship struct { + + // 7.1: Relationship + // Cardinality: optional, one or more; one per Relationship + // one mandatory for SPDX Document with multiple packages + // RefA and RefB are first and second item + // Relationship is type from 7.1.1 + RefA common.DocElementID `json:"spdxElementId"` + RefB common.DocElementID `json:"relatedSpdxElement"` + Relationship string `json:"relationshipType"` + + // 7.2: Relationship Comment + // Cardinality: optional, one + RelationshipComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/review.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/review.go new file mode 100644 index 0000000000..8d70d00e4b --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/review.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +// Review is a Review section of an SPDX Document for version 2.1 of the spec. +// DEPRECATED in version 2.0 of spec; retained here for compatibility. +type Review struct { + + // DEPRECATED in version 2.0 of spec + // 9.1: Reviewer + // Cardinality: optional, one + Reviewer string + // including AnnotatorType: one of "Person", "Organization" or "Tool" + ReviewerType string + + // DEPRECATED in version 2.0 of spec + // 9.2: Review Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: conditional (mandatory, one) if there is a Reviewer + ReviewDate string + + // DEPRECATED in version 2.0 of spec + // 9.3: Review Comment + // Cardinality: optional, one + ReviewComment string +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/snippet.go new file mode 100644 index 0000000000..9b94fd8d81 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/snippet.go @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// Snippet is a Snippet section of an SPDX Document for version 2.1 of the spec. +type Snippet struct { + + // 5.1: Snippet SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + SnippetSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 5.2: Snippet from File SPDX Identifier + // Cardinality: mandatory, one + SnippetFromFileSPDXIdentifier common.ElementID `json:"snippetFromFile"` + + // Ranges denotes the start/end byte offsets or line numbers that the snippet is relevant to + Ranges []common.SnippetRange `json:"ranges"` + + // 5.5: Snippet Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + SnippetLicenseConcluded string `json:"licenseConcluded"` + + // 5.6: License Information in Snippet: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one or many + LicenseInfoInSnippet []string `json:"licenseInfoInSnippets,omitempty"` + + // 5.7: Snippet Comments on License + // Cardinality: optional, one + SnippetLicenseComments string `json:"licenseComments,omitempty"` + + // 5.8: Snippet Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + SnippetCopyrightText string `json:"copyrightText"` + + // 5.9: Snippet Comment + // Cardinality: optional, one + SnippetComment string `json:"comment,omitempty"` + + // 5.10: Snippet Name + // Cardinality: optional, one + SnippetName string `json:"name,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/annotation.go similarity index 94% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/annotation.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/annotation.go index 35eddc617e..3d76d4b71f 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/annotation.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/annotation.go @@ -2,7 +2,9 @@ package v2_2 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) // Annotation is an Annotation section of an SPDX Document for version 2.2 of the spec. type Annotation struct { diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/creation_info.go similarity index 84% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/creation_info.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/creation_info.go index 70e611f79b..39082e7acf 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/creation_info.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/creation_info.go @@ -2,14 +2,16 @@ package v2_2 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) // CreationInfo is a Document Creation Information section of an // SPDX Document for version 2.2 of the spec. type CreationInfo struct { // 6.7: License List Version // Cardinality: optional, one - LicenseListVersion string `json:"licenseListVersion"` + LicenseListVersion string `json:"licenseListVersion,omitempty"` // 6.8: Creators: may have multiple keys for Person, Organization // and/or Tool diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/document.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/document.go new file mode 100644 index 0000000000..d94f5b066c --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/document.go @@ -0,0 +1,150 @@ +// Package spdx contains the struct definition for an SPDX Document +// and its constituent parts. +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +package v2_2 + +import ( + "encoding/json" + "fmt" + + converter "github.com/anchore/go-struct-converter" + + "github.com/spdx/tools-golang/spdx/v2/common" +) + +const Version = "SPDX-2.2" +const DataLicense = "CC0-1.0" + +// ExternalDocumentRef is a reference to an external SPDX document +// as defined in section 6.6 for version 2.2 of the spec. +type ExternalDocumentRef struct { + // DocumentRefID is the ID string defined in the start of the + // reference. It should _not_ contain the "DocumentRef-" part + // of the mandatory ID string. + DocumentRefID string `json:"externalDocumentId"` + + // URI is the URI defined for the external document + URI string `json:"spdxDocument"` + + // Checksum is the actual hash data + Checksum common.Checksum `json:"checksum"` +} + +// Document is an SPDX Document for version 2.2 of the spec. +// See https://spdx.github.io/spdx-spec/v2-draft/ (DRAFT) +type Document struct { + // 6.1: SPDX Version; should be in the format "SPDX-2.2" + // Cardinality: mandatory, one + SPDXVersion string `json:"spdxVersion"` + + // 6.2: Data License; should be "CC0-1.0" + // Cardinality: mandatory, one + DataLicense string `json:"dataLicense"` + + // 6.3: SPDX Identifier; should be "DOCUMENT" to represent + // mandatory identifier of SPDXRef-DOCUMENT + // Cardinality: mandatory, one + SPDXIdentifier common.ElementID `json:"SPDXID"` + + // 6.4: Document Name + // Cardinality: mandatory, one + DocumentName string `json:"name"` + + // 6.5: Document Namespace + // Cardinality: mandatory, one + DocumentNamespace string `json:"documentNamespace"` + + // 6.6: External Document References + // Cardinality: optional, one or many + ExternalDocumentReferences []ExternalDocumentRef `json:"externalDocumentRefs,omitempty"` + + // 6.11: Document Comment + // Cardinality: optional, one + DocumentComment string `json:"comment,omitempty"` + + CreationInfo *CreationInfo `json:"creationInfo"` + Packages []*Package `json:"packages,omitempty"` + Files []*File `json:"files,omitempty"` + OtherLicenses []*OtherLicense `json:"hasExtractedLicensingInfos,omitempty"` + Relationships []*Relationship `json:"relationships,omitempty"` + Annotations []*Annotation `json:"annotations,omitempty"` + Snippets []Snippet `json:"snippets,omitempty"` + + // DEPRECATED in version 2.0 of spec + Reviews []*Review `json:"-"` +} + +func (d *Document) ConvertFrom(_ interface{}) error { + d.SPDXVersion = Version + return nil +} + +var _ converter.ConvertFrom = (*Document)(nil) + +func (d *Document) UnmarshalJSON(b []byte) error { + type doc Document + type extras struct { + DocumentDescribes []common.DocElementID `json:"documentDescribes"` + } + + var d2 doc + if err := json.Unmarshal(b, &d2); err != nil { + return err + } + + var e extras + if err := json.Unmarshal(b, &e); err != nil { + return err + } + + *d = Document(d2) + + relationshipExists := map[string]bool{} + serializeRel := func(r *Relationship) string { + return fmt.Sprintf("%v-%v->%v", common.RenderDocElementID(r.RefA), r.Relationship, common.RenderDocElementID(r.RefB)) + } + + // index current list of relationships to ensure no duplication + for _, r := range d.Relationships { + relationshipExists[serializeRel(r)] = true + } + + // build relationships for documentDescribes field + for _, id := range e.DocumentDescribes { + r := &Relationship{ + RefA: common.DocElementID{ + ElementRefID: d.SPDXIdentifier, + }, + RefB: id, + Relationship: common.TypeRelationshipDescribe, + } + + if !relationshipExists[serializeRel(r)] { + d.Relationships = append(d.Relationships, r) + relationshipExists[serializeRel(r)] = true + } + } + + // build relationships for package hasFiles field + for _, p := range d.Packages { + for _, f := range p.hasFiles { + r := &Relationship{ + RefA: common.DocElementID{ + ElementRefID: p.PackageSPDXIdentifier, + }, + RefB: f, + Relationship: common.TypeRelationshipContains, + } + if !relationshipExists[serializeRel(r)] { + d.Relationships = append(d.Relationships, r) + relationshipExists[serializeRel(r)] = true + } + } + + p.hasFiles = nil + } + + return nil +} + +var _ json.Unmarshaler = (*Document)(nil) diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/file.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/file.go similarity index 98% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/file.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/file.go index 150e79f0bb..1433394901 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/file.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/file.go @@ -2,7 +2,9 @@ package v2_2 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) // File is a File section of an SPDX Document for version 2.2 of the spec. type File struct { diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/other_license.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/other_license.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/other_license.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/other_license.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/package.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/package.go similarity index 73% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/package.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/package.go index 2d99e0456b..54de537c3f 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/package.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/package.go @@ -2,7 +2,12 @@ package v2_2 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "encoding/json" + "strings" + + "github.com/spdx/tools-golang/spdx/v2/common" +) // Package is a Package section of an SPDX Document for version 2.2 of the spec. type Package struct { @@ -43,12 +48,12 @@ type Package struct { // 7.8: FilesAnalyzed // Cardinality: optional, one; default value is "true" if omitted - FilesAnalyzed bool `json:"filesAnalyzed,omitempty"` + FilesAnalyzed bool `json:"filesAnalyzed"` // NOT PART OF SPEC: did FilesAnalyzed tag appear? IsFilesAnalyzedTagPresent bool `json:"-"` // 7.9: Package Verification Code - PackageVerificationCode common.PackageVerificationCode `json:"packageVerificationCode"` + PackageVerificationCode common.PackageVerificationCode `json:"packageVerificationCode,omitempty"` // 7.10: Package Checksum: may have keys for SHA1, SHA256, SHA512 and/or MD5 // Cardinality: optional, one or many @@ -101,7 +106,7 @@ type Package struct { // 7.22: Package External Reference Comment // Cardinality: conditional (optional, one) for each External Reference - // contained within PackageExternalReference2_1 struct, if present + // contained within PackageExternalReference struct, if present // 7.23: Package Attribution Text // Cardinality: optional, one or many @@ -111,8 +116,44 @@ type Package struct { Files []*File `json:"files,omitempty"` Annotations []Annotation `json:"annotations,omitempty"` + + // this field is only used when decoding JSON to translate the hasFiles + // property to relationships + hasFiles []common.DocElementID } +func (p *Package) UnmarshalJSON(b []byte) error { + type pkg Package + type extras struct { + HasFiles []common.DocElementID `json:"hasFiles"` + FilesAnalyzed *bool `json:"filesAnalyzed"` + } + + var p2 pkg + if err := json.Unmarshal(b, &p2); err != nil { + return err + } + + var e extras + if err := json.Unmarshal(b, &e); err != nil { + return err + } + + *p = Package(p2) + + p.hasFiles = e.HasFiles + // FilesAnalyzed defaults to true if omitted + if e.FilesAnalyzed == nil { + p.FilesAnalyzed = true + } else { + p.IsFilesAnalyzedTagPresent = true + } + + return nil +} + +var _ json.Unmarshaler = (*Package)(nil) + // PackageExternalReference is an External Reference to additional info // about a Package, as defined in section 7.21 in version 2.2 of the spec. type PackageExternalReference struct { @@ -131,3 +172,32 @@ type PackageExternalReference struct { // Cardinality: conditional (optional, one) for each External Reference ExternalRefComment string `json:"comment,omitempty"` } + +var _ json.Unmarshaler = (*PackageExternalReference)(nil) + +func (r *PackageExternalReference) UnmarshalJSON(b []byte) error { + type ref PackageExternalReference + var rr ref + if err := json.Unmarshal(b, &rr); err != nil { + return err + } + + *r = PackageExternalReference(rr) + r.Category = strings.ReplaceAll(r.Category, "_", "-") + + return nil +} + +var _ json.Marshaler = (*PackageExternalReference)(nil) + +// We output as the JSON type enums since in v2.2.0 the JSON schema +// spec only had enums with _ (e.g. PACKAGE_MANAGER) +func (r *PackageExternalReference) MarshalJSON() ([]byte, error) { + type ref PackageExternalReference + var rr ref + + rr = ref(*r) + rr.Category = strings.ReplaceAll(rr.Category, "-", "_") + + return json.Marshal(&rr) +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/relationship.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/relationship.go similarity index 92% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/relationship.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/relationship.go index a93baa714d..47df33378f 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/relationship.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/relationship.go @@ -2,7 +2,9 @@ package v2_2 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) // Relationship is a Relationship section of an SPDX Document for // version 2.2 of the spec. diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/review.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/review.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/review.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/review.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/snippet.go similarity index 96% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/snippet.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/snippet.go index 61045f1e02..473c5a11cc 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/snippet.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/snippet.go @@ -2,7 +2,9 @@ package v2_2 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) // Snippet is a Snippet section of an SPDX Document for version 2.2 of the spec. type Snippet struct { diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/annotation.go similarity index 88% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/annotation.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/annotation.go index 121e995235..338394cf60 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/annotation.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/annotation.go @@ -2,9 +2,11 @@ package v2_3 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) -// Annotation is an Annotation section of an SPDX Document for version 2.3 of the spec. +// Annotation is an Annotation section of an SPDX Document type Annotation struct { // 12.1: Annotator // Cardinality: conditional (mandatory, one) if there is an Annotation diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/creation_info.go similarity index 80% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/creation_info.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/creation_info.go index 33b2caf070..84d5bf082e 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/creation_info.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/creation_info.go @@ -2,14 +2,15 @@ package v2_3 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) -// CreationInfo is a Document Creation Information section of an -// SPDX Document for version 2.3 of the spec. +// CreationInfo is a Document Creation Information section of an SPDX Document type CreationInfo struct { // 6.7: License List Version // Cardinality: optional, one - LicenseListVersion string `json:"licenseListVersion"` + LicenseListVersion string `json:"licenseListVersion,omitempty"` // 6.8: Creators: may have multiple keys for Person, Organization // and/or Tool diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/document.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/document.go new file mode 100644 index 0000000000..279e976ccd --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/document.go @@ -0,0 +1,150 @@ +// Package v2_3 Package contains the struct definition for an SPDX Document +// and its constituent parts. +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +package v2_3 + +import ( + "encoding/json" + "fmt" + + converter "github.com/anchore/go-struct-converter" + + "github.com/spdx/tools-golang/spdx/v2/common" +) + +const Version = "SPDX-2.3" +const DataLicense = "CC0-1.0" + +// ExternalDocumentRef is a reference to an external SPDX document as defined in section 6.6 +type ExternalDocumentRef struct { + // DocumentRefID is the ID string defined in the start of the + // reference. It should _not_ contain the "DocumentRef-" part + // of the mandatory ID string. + DocumentRefID string `json:"externalDocumentId"` + + // URI is the URI defined for the external document + URI string `json:"spdxDocument"` + + // Checksum is the actual hash data + Checksum common.Checksum `json:"checksum"` +} + +// Document is an SPDX Document: +// See https://spdx.github.io/spdx-spec/v2.3/document-creation-information +type Document struct { + // 6.1: SPDX Version; should be in the format "SPDX-" + // Cardinality: mandatory, one + SPDXVersion string `json:"spdxVersion"` + + // 6.2: Data License; should be "CC0-1.0" + // Cardinality: mandatory, one + DataLicense string `json:"dataLicense"` + + // 6.3: SPDX Identifier; should be "DOCUMENT" to represent + // mandatory identifier of SPDXRef-DOCUMENT + // Cardinality: mandatory, one + SPDXIdentifier common.ElementID `json:"SPDXID"` + + // 6.4: Document Name + // Cardinality: mandatory, one + DocumentName string `json:"name"` + + // 6.5: Document Namespace + // Cardinality: mandatory, one + DocumentNamespace string `json:"documentNamespace"` + + // 6.6: External Document References + // Cardinality: optional, one or many + ExternalDocumentReferences []ExternalDocumentRef `json:"externalDocumentRefs,omitempty"` + + // 6.11: Document Comment + // Cardinality: optional, one + DocumentComment string `json:"comment,omitempty"` + + CreationInfo *CreationInfo `json:"creationInfo"` + Packages []*Package `json:"packages,omitempty"` + Files []*File `json:"files,omitempty"` + OtherLicenses []*OtherLicense `json:"hasExtractedLicensingInfos,omitempty"` + Relationships []*Relationship `json:"relationships,omitempty"` + Annotations []*Annotation `json:"annotations,omitempty"` + Snippets []Snippet `json:"snippets,omitempty"` + + // DEPRECATED in version 2.0 of spec + Reviews []*Review `json:"-" yaml:"-"` +} + +func (d *Document) ConvertFrom(_ interface{}) error { + d.SPDXVersion = Version + return nil +} + +var _ converter.ConvertFrom = (*Document)(nil) + +func (d *Document) UnmarshalJSON(b []byte) error { + type doc Document + type extras struct { + DocumentDescribes []common.DocElementID `json:"documentDescribes"` + } + + var d2 doc + if err := json.Unmarshal(b, &d2); err != nil { + return err + } + + var e extras + if err := json.Unmarshal(b, &e); err != nil { + return err + } + + *d = Document(d2) + + relationshipExists := map[string]bool{} + serializeRel := func(r *Relationship) string { + return fmt.Sprintf("%v-%v->%v", common.RenderDocElementID(r.RefA), r.Relationship, common.RenderDocElementID(r.RefB)) + } + + // index current list of relationships to ensure no duplication + for _, r := range d.Relationships { + relationshipExists[serializeRel(r)] = true + } + + // build relationships for documentDescribes field + for _, id := range e.DocumentDescribes { + r := &Relationship{ + RefA: common.DocElementID{ + ElementRefID: d.SPDXIdentifier, + }, + RefB: id, + Relationship: common.TypeRelationshipDescribe, + } + + if !relationshipExists[serializeRel(r)] { + d.Relationships = append(d.Relationships, r) + relationshipExists[serializeRel(r)] = true + } + } + + // build relationships for package hasFiles field + // build relationships for package hasFiles field + for _, p := range d.Packages { + for _, f := range p.hasFiles { + r := &Relationship{ + RefA: common.DocElementID{ + ElementRefID: p.PackageSPDXIdentifier, + }, + RefB: f, + Relationship: common.TypeRelationshipContains, + } + if !relationshipExists[serializeRel(r)] { + d.Relationships = append(d.Relationships, r) + relationshipExists[serializeRel(r)] = true + } + } + + p.hasFiles = nil + } + + return nil +} + +var _ json.Unmarshaler = (*Document)(nil) diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/file.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/file.go similarity index 94% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/file.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/file.go index c472fdb2fc..9f8f28acdb 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/file.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/file.go @@ -2,9 +2,11 @@ package v2_3 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) -// File is a File section of an SPDX Document for version 2.3 of the spec. +// File is a File section of an SPDX Document type File struct { // 8.1: File Name // Cardinality: mandatory, one @@ -74,7 +76,7 @@ type File struct { } // ArtifactOfProject is a DEPRECATED collection of data regarding -// a Package, as defined in sections 8.9-8.11 in version 2.3 of the spec. +// a Package, as defined in sections 8.9-8.11. // NOTE: the JSON schema does not define the structure of this object: // https://github.com/spdx/spdx-spec/blob/development/v2.3.1/schemas/spdx-schema.json#L480 type ArtifactOfProject struct { diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/other_license.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/other_license.go similarity index 90% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/other_license.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/other_license.go index 363bb41253..55971f42a7 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/other_license.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/other_license.go @@ -2,8 +2,7 @@ package v2_3 -// OtherLicense is an Other License Information section of an -// SPDX Document for version 2.3 of the spec. +// OtherLicense is an Other License Information section of an SPDX Document type OtherLicense struct { // 10.1: License Identifier: "LicenseRef-[idstring]" // Cardinality: conditional (mandatory, one) if license is not diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/package.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/package.go similarity index 75% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/package.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/package.go index b9d5b9515b..0acadc27be 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/package.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/package.go @@ -2,9 +2,14 @@ package v2_3 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "encoding/json" + "strings" -// Package is a Package section of an SPDX Document for version 2.3 of the spec. + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// Package is a Package section of an SPDX Document type Package struct { // NOT PART OF SPEC // flag: does this "package" contain files that were in fact "unpackaged", @@ -43,7 +48,7 @@ type Package struct { // 7.8: FilesAnalyzed // Cardinality: optional, one; default value is "true" if omitted - FilesAnalyzed bool `json:"filesAnalyzed,omitempty"` + FilesAnalyzed bool `json:"filesAnalyzed"` // NOT PART OF SPEC: did FilesAnalyzed tag appear? IsFilesAnalyzedTagPresent bool `json:"-" yaml:"-"` @@ -81,8 +86,8 @@ type Package struct { PackageLicenseComments string `json:"licenseComments,omitempty"` // 7.17: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" - // Cardinality: mandatory, one - PackageCopyrightText string `json:"copyrightText"` + // Cardinality: optional, zero or one + PackageCopyrightText string `json:"copyrightText,omitempty"` // 7.18: Package Summary Description // Cardinality: optional, one @@ -102,7 +107,7 @@ type Package struct { // 7.22: Package External Reference Comment // Cardinality: conditional (optional, one) for each External Reference - // contained within PackageExternalReference2_1 struct, if present + // contained within PackageExternalReference struct, if present // 7.23: Package Attribution Text // Cardinality: optional, one or many @@ -129,10 +134,47 @@ type Package struct { Files []*File `json:"files,omitempty"` Annotations []Annotation `json:"annotations,omitempty"` + + // this field is only used when decoding JSON to translate the hasFiles + // property to relationships + hasFiles []common.DocElementID } +func (p *Package) UnmarshalJSON(b []byte) error { + type pkg Package + type extras struct { + HasFiles []common.DocElementID `json:"hasFiles"` + FilesAnalyzed *bool `json:"filesAnalyzed"` + } + + var p2 pkg + if err := json.Unmarshal(b, &p2); err != nil { + return err + } + + var e extras + if err := json.Unmarshal(b, &e); err != nil { + return err + } + + *p = Package(p2) + + p.hasFiles = e.HasFiles + + // FilesAnalyzed defaults to true if omitted + if e.FilesAnalyzed == nil { + p.FilesAnalyzed = true + } else { + p.IsFilesAnalyzedTagPresent = true + } + + return nil +} + +var _ json.Unmarshaler = (*Package)(nil) + // PackageExternalReference is an External Reference to additional info -// about a Package, as defined in section 7.21 in version 2.3 of the spec. +// about a Package, as defined in section 7.21 type PackageExternalReference struct { // category is "SECURITY", "PACKAGE-MANAGER" or "OTHER" Category string `json:"referenceCategory"` @@ -149,3 +191,31 @@ type PackageExternalReference struct { // Cardinality: conditional (optional, one) for each External Reference ExternalRefComment string `json:"comment,omitempty"` } + +var _ json.Unmarshaler = (*PackageExternalReference)(nil) + +func (r *PackageExternalReference) UnmarshalJSON(b []byte) error { + type ref PackageExternalReference + var rr ref + if err := json.Unmarshal(b, &rr); err != nil { + return err + } + + rr.Category = strings.ReplaceAll(rr.Category, "_", "-") + + *r = PackageExternalReference(rr) + return nil +} + +var _ json.Marshaler = (*PackageExternalReference)(nil) + +func (r *PackageExternalReference) MarshalJSON() ([]byte, error) { + type ref PackageExternalReference + var rr ref + + rr = ref(*r) + + rr.Category = strings.ReplaceAll(rr.Category, "_", "-") + + return json.Marshal(&rr) +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/relationship.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/relationship.go similarity index 81% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/relationship.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/relationship.go index af4c07d164..d5cd8d8ba1 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/relationship.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/relationship.go @@ -2,10 +2,11 @@ package v2_3 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) -// Relationship is a Relationship section of an SPDX Document for -// version 2.3 of the spec. +// Relationship is a Relationship section of an SPDX Document type Relationship struct { // 11.1: Relationship diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/review.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/review.go similarity index 89% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/review.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/review.go index 0463807fbd..cf1a1c71c5 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/review.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/review.go @@ -2,7 +2,7 @@ package v2_3 -// Review is a Review section of an SPDX Document for version 2.3 of the spec. +// Review is a Review section of an SPDX Document. // DEPRECATED in version 2.0 of spec; retained here for compatibility. type Review struct { diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/snippet.go similarity index 92% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/snippet.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/snippet.go index d55a1a968f..9c479d2323 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/snippet.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/snippet.go @@ -2,9 +2,11 @@ package v2_3 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) -// Snippet is a Snippet section of an SPDX Document for version 2.3 of the spec. +// Snippet is a Snippet section of an SPDX Document type Snippet struct { // 9.1: Snippet SPDX Identifier: "SPDXRef-[idstring]" diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/document.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/document.go deleted file mode 100644 index 32fdb8db84..0000000000 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/document.go +++ /dev/null @@ -1,65 +0,0 @@ -// Package spdx contains the struct definition for an SPDX Document -// and its constituent parts. -// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later -package v2_3 - -import "github.com/spdx/tools-golang/spdx/common" - -// ExternalDocumentRef is a reference to an external SPDX document -// as defined in section 6.6 for version 2.3 of the spec. -type ExternalDocumentRef struct { - // DocumentRefID is the ID string defined in the start of the - // reference. It should _not_ contain the "DocumentRef-" part - // of the mandatory ID string. - DocumentRefID string `json:"externalDocumentId"` - - // URI is the URI defined for the external document - URI string `json:"spdxDocument"` - - // Checksum is the actual hash data - Checksum common.Checksum `json:"checksum"` -} - -// Document is an SPDX Document for version 2.3 of the spec. -// See https://spdx.github.io/spdx-spec/v2.3/document-creation-information -type Document struct { - // 6.1: SPDX Version; should be in the format "SPDX-2.3" - // Cardinality: mandatory, one - SPDXVersion string `json:"spdxVersion"` - - // 6.2: Data License; should be "CC0-1.0" - // Cardinality: mandatory, one - DataLicense string `json:"dataLicense"` - - // 6.3: SPDX Identifier; should be "DOCUMENT" to represent - // mandatory identifier of SPDXRef-DOCUMENT - // Cardinality: mandatory, one - SPDXIdentifier common.ElementID `json:"SPDXID"` - - // 6.4: Document Name - // Cardinality: mandatory, one - DocumentName string `json:"name"` - - // 6.5: Document Namespace - // Cardinality: mandatory, one - DocumentNamespace string `json:"documentNamespace"` - - // 6.6: External Document References - // Cardinality: optional, one or many - ExternalDocumentReferences []ExternalDocumentRef `json:"externalDocumentRefs,omitempty"` - - // 6.11: Document Comment - // Cardinality: optional, one - DocumentComment string `json:"comment,omitempty"` - - CreationInfo *CreationInfo `json:"creationInfo"` - Packages []*Package `json:"packages,omitempty"` - Files []*File `json:"files,omitempty"` - OtherLicenses []*OtherLicense `json:"hasExtractedLicensingInfos,omitempty"` - Relationships []*Relationship `json:"relationships,omitempty"` - Annotations []*Annotation `json:"annotations,omitempty"` - Snippets []Snippet `json:"snippets,omitempty"` - - // DEPRECATED in version 2.0 of spec - Reviews []*Review `json:"-" yaml:"-"` -} diff --git a/vendor/modules.txt b/vendor/modules.txt index efb2ec1278..85183f5eab 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -87,6 +87,9 @@ github.com/RackSec/srslog # github.com/agext/levenshtein v1.2.3 ## explicit github.com/agext/levenshtein +# github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 +## explicit; go 1.18 +github.com/anchore/go-struct-converter # github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 ## explicit github.com/armon/circbuf @@ -363,11 +366,12 @@ github.com/containerd/go-cni # github.com/containerd/go-runc v1.1.0 ## explicit; go 1.18 github.com/containerd/go-runc -# github.com/containerd/nydus-snapshotter v0.3.1 -## explicit; go 1.17 +# github.com/containerd/nydus-snapshotter v0.8.2 +## explicit; go 1.19 github.com/containerd/nydus-snapshotter/pkg/converter github.com/containerd/nydus-snapshotter/pkg/converter/tool github.com/containerd/nydus-snapshotter/pkg/errdefs +github.com/containerd/nydus-snapshotter/pkg/label # github.com/containerd/stargz-snapshotter/estargz v0.14.3 ## explicit; go 1.19 github.com/containerd/stargz-snapshotter/estargz @@ -375,9 +379,6 @@ github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/containerd/ttrpc v1.2.2 ## explicit; go 1.13 github.com/containerd/ttrpc -# github.com/containerd/typeurl v1.0.2 -## explicit; go 1.13 -github.com/containerd/typeurl # github.com/containerd/typeurl/v2 v2.1.1 ## explicit; go 1.13 github.com/containerd/typeurl/v2 @@ -664,8 +665,8 @@ github.com/mitchellh/hashstructure/v2 # github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk -# github.com/moby/buildkit v0.11.7-0.20230723230859-616c3f613b54 -## explicit; go 1.18 +# github.com/moby/buildkit v0.12.2 +## explicit; go 1.20 github.com/moby/buildkit/api/services/control github.com/moby/buildkit/api/types github.com/moby/buildkit/cache @@ -691,12 +692,15 @@ github.com/moby/buildkit/control/gateway github.com/moby/buildkit/executor github.com/moby/buildkit/executor/containerdexecutor github.com/moby/buildkit/executor/oci +github.com/moby/buildkit/executor/resources +github.com/moby/buildkit/executor/resources/types github.com/moby/buildkit/executor/runcexecutor github.com/moby/buildkit/exporter github.com/moby/buildkit/exporter/attestation github.com/moby/buildkit/exporter/containerimage github.com/moby/buildkit/exporter/containerimage/exptypes github.com/moby/buildkit/exporter/containerimage/image +github.com/moby/buildkit/exporter/exptypes github.com/moby/buildkit/exporter/local github.com/moby/buildkit/exporter/oci github.com/moby/buildkit/exporter/tar @@ -711,8 +715,10 @@ github.com/moby/buildkit/frontend/dockerfile/dockerignore github.com/moby/buildkit/frontend/dockerfile/instructions github.com/moby/buildkit/frontend/dockerfile/parser github.com/moby/buildkit/frontend/dockerfile/shell +github.com/moby/buildkit/frontend/dockerui github.com/moby/buildkit/frontend/gateway github.com/moby/buildkit/frontend/gateway/client +github.com/moby/buildkit/frontend/gateway/container github.com/moby/buildkit/frontend/gateway/forwarder github.com/moby/buildkit/frontend/gateway/grpcclient github.com/moby/buildkit/frontend/gateway/pb @@ -760,8 +766,6 @@ github.com/moby/buildkit/util/appdefaults github.com/moby/buildkit/util/archutil github.com/moby/buildkit/util/attestation github.com/moby/buildkit/util/bklog -github.com/moby/buildkit/util/buildinfo -github.com/moby/buildkit/util/buildinfo/types github.com/moby/buildkit/util/compression github.com/moby/buildkit/util/cond github.com/moby/buildkit/util/contentutil @@ -995,12 +999,16 @@ github.com/shibumi/go-pathspec # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f +# github.com/spdx/tools-golang v0.5.1 ## explicit; go 1.13 +github.com/spdx/tools-golang/convert github.com/spdx/tools-golang/json +github.com/spdx/tools-golang/spdx github.com/spdx/tools-golang/spdx/common -github.com/spdx/tools-golang/spdx/v2_2 -github.com/spdx/tools-golang/spdx/v2_3 +github.com/spdx/tools-golang/spdx/v2/common +github.com/spdx/tools-golang/spdx/v2/v2_1 +github.com/spdx/tools-golang/spdx/v2/v2_2 +github.com/spdx/tools-golang/spdx/v2/v2_3 # github.com/spf13/cobra v1.7.0 ## explicit; go 1.15 github.com/spf13/cobra From 2246297ae621787f73f924226dd2420bd64d8505 Mon Sep 17 00:00:00 2001 From: Bjorn Neergaard Date: Thu, 13 Jul 2023 16:13:19 -0600 Subject: [PATCH 05/11] builder-next: adopt new wrapped types With BuildKit 0.12, some existing types are now required to be wrapped by new types: * containerd's LeaseManager and ContentStore have to be a (namespace-aware) BuildKit type since https://github.com/moby/buildkit/commit/f044e0a9468639559db93fe30ee826ce502ac481 * BuildKit's solver.CacheManager is used instead of bboltstorage.CacheKeyStorage since https://github.com/moby/buildkit/commit/2b30693409271c579d55f9a1e256926de89c9edf * The MaxAge config field is a bkconfig.Duration since https://github.com/moby/buildkit/commit/e06c96274ffe3bd777c202f121af151b36a9f57d Signed-off-by: Bjorn Neergaard --- .../builder-next/adapters/snapshot/snapshot.go | 8 +++++--- builder/builder-next/controller.go | 16 +++++++--------- builder/builder-next/worker/worker.go | 15 ++++++++------- daemon/config/builder.go | 5 +++-- 4 files changed, 23 insertions(+), 21 deletions(-) diff --git a/builder/builder-next/adapters/snapshot/snapshot.go b/builder/builder-next/adapters/snapshot/snapshot.go index 8c5f803885..a559e15b4b 100644 --- a/builder/builder-next/adapters/snapshot/snapshot.go +++ b/builder/builder-next/adapters/snapshot/snapshot.go @@ -16,6 +16,7 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/leaseutil" "github.com/opencontainers/go-digest" "github.com/pkg/errors" bolt "go.etcd.io/bbolt" @@ -57,7 +58,7 @@ type snapshotter struct { } // NewSnapshotter creates a new snapshotter -func NewSnapshotter(opt Opt, prevLM leases.Manager) (snapshot.Snapshotter, leases.Manager, error) { +func NewSnapshotter(opt Opt, prevLM leases.Manager, ns string) (snapshot.Snapshotter, *leaseutil.Manager, error) { dbPath := filepath.Join(opt.Root, "snapshots.db") db, err := bolt.Open(dbPath, 0o600, nil) if err != nil { @@ -76,7 +77,8 @@ func NewSnapshotter(opt Opt, prevLM leases.Manager) (snapshot.Snapshotter, lease reg: reg, } - lm := newLeaseManager(s, prevLM) + slm := newLeaseManager(s, prevLM) + lm := leaseutil.WithNamespace(slm, ns) ll, err := lm.List(context.TODO()) if err != nil { @@ -89,7 +91,7 @@ func NewSnapshotter(opt Opt, prevLM leases.Manager) (snapshot.Snapshotter, lease } for _, r := range rr { if r.Type == "snapshots/default" { - lm.addRef(l.ID, r.ID) + slm.addRef(l.ID, r.ID) } } } diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go index b9805f7ebd..59981ef239 100644 --- a/builder/builder-next/controller.go +++ b/builder/builder-next/controller.go @@ -39,10 +39,10 @@ import ( "github.com/moby/buildkit/frontend/gateway" "github.com/moby/buildkit/frontend/gateway/forwarder" containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" + "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/bboltcachestorage" "github.com/moby/buildkit/util/archutil" "github.com/moby/buildkit/util/entitlements" - "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/network/netproviders" "github.com/moby/buildkit/util/tracing/detect" "github.com/moby/buildkit/worker" @@ -135,7 +135,7 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt SessionManager: opt.SessionManager, WorkerController: wc, Frontends: frontends, - CacheKeyStorage: cacheStorage, + CacheManager: solver.NewCacheManager(ctx, "local", cacheStorage, worker.NewCacheResultStorage(wc)), ResolveCacheImporterFuncs: map[string]remotecache.ResolveCacheImporterFunc{ "gha": gha.ResolveCacheImporterFunc(), "local": localremotecache.ResolveCacheImporterFunc(opt.SessionManager), @@ -202,7 +202,7 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt return nil, errors.Errorf("could not access graphdriver") } - store, err := local.NewStore(filepath.Join(root, "content")) + innerStore, err := local.NewStore(filepath.Join(root, "content")) if err != nil { return nil, err } @@ -212,18 +212,16 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt return nil, errors.WithStack(err) } - mdb := ctdmetadata.NewDB(db, store, map[string]snapshots.Snapshotter{}) + mdb := ctdmetadata.NewDB(db, innerStore, map[string]snapshots.Snapshotter{}) - store = containerdsnapshot.NewContentStore(mdb.ContentStore(), "buildkit") - - lm := leaseutil.WithNamespace(ctdmetadata.NewLeaseManager(mdb), "buildkit") + store := containerdsnapshot.NewContentStore(mdb.ContentStore(), "buildkit") snapshotter, lm, err := snapshot.NewSnapshotter(snapshot.Opt{ GraphDriver: driver, LayerStore: dist.LayerStore, Root: root, IdentityMapping: opt.IdentityMapping, - }, lm) + }, ctdmetadata.NewLeaseManager(mdb), "buildkit") if err != nil { return nil, err } @@ -358,7 +356,7 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt SessionManager: opt.SessionManager, WorkerController: wc, Frontends: frontends, - CacheKeyStorage: cacheStorage, + CacheManager: solver.NewCacheManager(ctx, "local", cacheStorage, worker.NewCacheResultStorage(wc)), ResolveCacheImporterFuncs: map[string]remotecache.ResolveCacheImporterFunc{ "registry": localinlinecache.ResolveCacheImporterFunc(opt.SessionManager, opt.RegistryHosts, store, dist.ReferenceStore, dist.ImageStore), "local": localremotecache.ResolveCacheImporterFunc(opt.SessionManager), diff --git a/builder/builder-next/worker/worker.go b/builder/builder-next/worker/worker.go index 277c7a371a..a3e17b1ae0 100644 --- a/builder/builder-next/worker/worker.go +++ b/builder/builder-next/worker/worker.go @@ -9,7 +9,6 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/leases" "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/rootfs" @@ -32,6 +31,7 @@ import ( "github.com/moby/buildkit/frontend" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" + containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/llbsolver/mounts" "github.com/moby/buildkit/solver/llbsolver/ops" @@ -42,6 +42,7 @@ import ( "github.com/moby/buildkit/source/local" "github.com/moby/buildkit/util/archutil" "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/version" "github.com/opencontainers/go-digest" @@ -71,9 +72,9 @@ type Opt struct { GCPolicy []client.PruneInfo Executor executor.Executor Snapshotter snapshot.Snapshotter - ContentStore content.Store + ContentStore *containerdsnapshot.Store CacheManager cache.Manager - LeaseManager leases.Manager + LeaseManager *leaseutil.Manager ImageSource *containerimage.Source DownloadManager *xfer.LayerDownloadManager V2MetadataService distmetadata.V2MetadataService @@ -186,13 +187,13 @@ func (w *Worker) Close() error { return nil } -// ContentStore returns content store -func (w *Worker) ContentStore() content.Store { +// ContentStore returns the wrapped content store +func (w *Worker) ContentStore() *containerdsnapshot.Store { return w.Opt.ContentStore } -// LeaseManager returns leases.Manager for the worker -func (w *Worker) LeaseManager() leases.Manager { +// LeaseManager returns the wrapped lease manager +func (w *Worker) LeaseManager() *leaseutil.Manager { return w.Opt.LeaseManager } diff --git a/daemon/config/builder.go b/daemon/config/builder.go index 52be1b314c..8801ba20cb 100644 --- a/daemon/config/builder.go +++ b/daemon/config/builder.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/docker/docker/api/types/filters" + bkconfig "github.com/moby/buildkit/cmd/buildkitd/config" ) // BuilderGCRule represents a GC rule for buildkit cache @@ -62,8 +63,8 @@ type BuilderGCConfig struct { // BuilderHistoryConfig contains history config for a buildkit builder type BuilderHistoryConfig struct { - MaxAge int64 `json:",omitempty"` - MaxEntries int64 `json:",omitempty"` + MaxAge bkconfig.Duration `json:",omitempty"` + MaxEntries int64 `json:",omitempty"` } // BuilderEntitlements contains settings to enable/disable entitlements From 221b73e23d09c32c20506df2eabd64a2e0606455 Mon Sep 17 00:00:00 2001 From: Bjorn Neergaard Date: Thu, 13 Jul 2023 15:19:17 -0600 Subject: [PATCH 06/11] builder-next: apply source policies on config resolve SourcePolicy was accounted for in https://github.com/moby/buildkit/commit/330cf7ae7d9ac983432ab13f8bc9e7d4aff75695 TODO: replace applySourcePolicies with BuildKit's implementation, which is currently unexported. Co-authored-by: Tonis Tiigi Signed-off-by: Bjorn Neergaard --- .../adapters/containerimage/pull.go | 74 ++++++++++++++++--- builder/builder-next/worker/worker.go | 2 +- 2 files changed, 63 insertions(+), 13 deletions(-) diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go index 55394948ea..f819394e8c 100644 --- a/builder/builder-next/adapters/containerimage/pull.go +++ b/builder/builder-next/adapters/containerimage/pull.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "path" + "strings" "sync" "time" @@ -16,6 +17,7 @@ import ( "github.com/containerd/containerd/leases" "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" + cdreference "github.com/containerd/containerd/reference" ctdreference "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" @@ -32,8 +34,12 @@ import ( "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/source" srctypes "github.com/moby/buildkit/source/types" + "github.com/moby/buildkit/sourcepolicy" + policy "github.com/moby/buildkit/sourcepolicy/pb" + spb "github.com/moby/buildkit/sourcepolicy/pb" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/imageutil" "github.com/moby/buildkit/util/leaseutil" @@ -92,8 +98,9 @@ func (is *Source) resolveLocal(refStr string) (*image.Image, error) { return img, nil } -func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) { +func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) { type t struct { + ref string dgst digest.Digest dt []byte } @@ -105,32 +112,36 @@ func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocisp key := "getconfig::" + ref + "::" + platforms.Format(p) res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) { res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g) - dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform) + ref, dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform, []*policy.Policy{}) if err != nil { return nil, err } - return &t{dgst: dgst, dt: dt}, nil + return &t{ref: ref, dgst: dgst, dt: dt}, nil }) var typed *t if err != nil { - return "", nil, err + return ref, "", nil, err } typed = res.(*t) - return typed.dgst, typed.dt, nil + return typed.ref, typed.dgst, typed.dt, nil } // ResolveImageConfig returns image config for an image -func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) { +func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) { + ref, err := applySourcePolicies(ctx, ref, opt.SourcePolicies) + if err != nil { + return "", "", nil, err + } resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode) if err != nil { - return "", nil, err + return ref, "", nil, err } switch resolveMode { case source.ResolveModeForcePull: - dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm, g) + ref, dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm, g) // TODO: pull should fallback to local in case of failure to allow offline behavior // the fallback doesn't work currently - return dgst, dt, err + return ref, dgst, dt, err /* if err == nil { return dgst, dt, err @@ -152,14 +163,14 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.Re path.Join(img.OS, img.Architecture, img.Variant), ) } else { - return "", img.RawJSON(), err + return ref, "", img.RawJSON(), err } } // fallback to remote return is.resolveRemote(ctx, ref, opt.Platform, sm, g) } // should never happen - return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode) + return ref, "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode) } // Resolve returns access to pulling for an identifier @@ -289,11 +300,12 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error { if err != nil { return struct{}{}, err } - _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm, g) + newRef, _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm, g) if err != nil { return struct{}{}, err } + p.ref = newRef p.config = dt } return struct{}{}, nil @@ -860,3 +872,41 @@ func platformMatches(img *image.Image, p *ocispec.Platform) bool { Variant: img.Variant, }) } + +func applySourcePolicies(ctx context.Context, str string, spls []*spb.Policy) (string, error) { + ref, err := cdreference.Parse(str) + if err != nil { + return "", errors.WithStack(err) + } + op := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: srctypes.DockerImageScheme + "://" + ref.String(), + }, + }, + } + + mut, err := sourcepolicy.NewEngine(spls).Evaluate(ctx, op) + if err != nil { + return "", errors.Wrap(err, "could not resolve image due to policy") + } + + if mut { + var ( + t string + ok bool + ) + t, newRef, ok := strings.Cut(op.GetSource().GetIdentifier(), "://") + if !ok { + return "", errors.Errorf("could not parse ref: %s", op.GetSource().GetIdentifier()) + } + if ok && t != srctypes.DockerImageScheme { + return "", &imageutil.ResolveToNonImageError{Ref: str, Updated: newRef} + } + ref, err = cdreference.Parse(newRef) + if err != nil { + return "", errors.WithStack(err) + } + } + return ref.String(), nil +} diff --git a/builder/builder-next/worker/worker.go b/builder/builder-next/worker/worker.go index a3e17b1ae0..07081fa893 100644 --- a/builder/builder-next/worker/worker.go +++ b/builder/builder-next/worker/worker.go @@ -236,7 +236,7 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se } // ResolveImageConfig returns image config for an image -func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) { +func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) { return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g) } From a728bd0ee93010ec2415f96a343ffcb6ca70ad5d Mon Sep 17 00:00:00 2001 From: Bjorn Neergaard Date: Thu, 13 Jul 2023 16:49:15 -0600 Subject: [PATCH 07/11] builder-next/pull: use ResolveMode.String() helper Introduced years ago in https://github.com/moby/buildkit/commit/6644e1b0dab8cae614207083ec1ba523a495fb44 Signed-off-by: Bjorn Neergaard --- .../builder-next/adapters/containerimage/pull.go | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go index f819394e8c..d516b49385 100644 --- a/builder/builder-next/adapters/containerimage/pull.go +++ b/builder/builder-next/adapters/containerimage/pull.go @@ -300,7 +300,7 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error { if err != nil { return struct{}{}, err } - newRef, _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm, g) + newRef, _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: p.src.ResolveMode.String()}, p.sm, g) if err != nil { return struct{}{}, err } @@ -849,20 +849,6 @@ func cacheKeyFromConfig(dt []byte) digest.Digest { return identity.ChainID(img.RootFS.DiffIDs) } -// resolveModeToString is the equivalent of github.com/moby/buildkit/solver/llb.ResolveMode.String() -// FIXME: add String method on source.ResolveMode -func resolveModeToString(rm source.ResolveMode) string { - switch rm { - case source.ResolveModeDefault: - return "default" - case source.ResolveModeForcePull: - return "pull" - case source.ResolveModePreferLocal: - return "local" - } - return "" -} - func platformMatches(img *image.Image, p *ocispec.Platform) bool { return dimages.OnlyPlatformWithFallback(*p).Match(ocispec.Platform{ Architecture: img.Architecture, From c8773e10b1750c63c6af52f487046022409a60f8 Mon Sep 17 00:00:00 2001 From: Bjorn Neergaard Date: Thu, 13 Jul 2023 16:53:30 -0600 Subject: [PATCH 08/11] builder-next/wrapper: use new exptypes constants Introduced in https://github.com/moby/buildkit/commit/4fc2d7b5e7ffa51235a30f55432d1f0eb619f77c Signed-off-by: Bjorn Neergaard --- .../exporter/overrides/wrapper.go | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/builder/builder-next/exporter/overrides/wrapper.go b/builder/builder-next/exporter/overrides/wrapper.go index 59b3c9523d..220a042df1 100644 --- a/builder/builder-next/exporter/overrides/wrapper.go +++ b/builder/builder-next/exporter/overrides/wrapper.go @@ -5,14 +5,7 @@ import ( "strings" "github.com/moby/buildkit/exporter" -) - -// TODO(vvoland): Use buildkit consts once they're public -// https://github.com/moby/buildkit/pull/3694 -const ( - keyImageName = "name" - keyUnpack = "unpack" - keyDanglingPrefix = "dangling-name-prefix" + "github.com/moby/buildkit/exporter/containerimage/exptypes" ) // Wraps the containerimage exporter's Resolve method to apply moby-specific @@ -30,14 +23,14 @@ func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, exporterAttrs ma if exporterAttrs == nil { exporterAttrs = make(map[string]string) } - reposAndTags, err := SanitizeRepoAndTags(strings.Split(exporterAttrs[keyImageName], ",")) + reposAndTags, err := SanitizeRepoAndTags(strings.Split(exporterAttrs[string(exptypes.OptKeyName)], ",")) if err != nil { return nil, err } - exporterAttrs[keyImageName] = strings.Join(reposAndTags, ",") - exporterAttrs[keyUnpack] = "true" - if _, has := exporterAttrs[keyDanglingPrefix]; !has { - exporterAttrs[keyDanglingPrefix] = "moby-dangling" + exporterAttrs[string(exptypes.OptKeyName)] = strings.Join(reposAndTags, ",") + exporterAttrs[string(exptypes.OptKeyUnpack)] = "true" + if _, has := exporterAttrs[string(exptypes.OptKeyDanglingPrefix)]; !has { + exporterAttrs[string(exptypes.OptKeyDanglingPrefix)] = "moby-dangling" } return e.exp.Resolve(ctx, exporterAttrs) From 34e71b374bad330cf82ad2ada0acbbf1b5995c5a Mon Sep 17 00:00:00 2001 From: Bjorn Neergaard Date: Thu, 21 Sep 2023 12:55:36 -0600 Subject: [PATCH 09/11] builder-next/a/ci/pull: strongly type flightcontrol.Group Now that this is a generic, we can define a struct type at the package level, and remove the casting logic necessary when we had to use interface{}. Signed-off-by: Bjorn Neergaard --- .../adapters/containerimage/pull.go | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go index d516b49385..705155dfe7 100644 --- a/builder/builder-next/adapters/containerimage/pull.go +++ b/builder/builder-next/adapters/containerimage/pull.go @@ -69,7 +69,7 @@ type SourceOpt struct { // Source is the source implementation for accessing container images type Source struct { SourceOpt - g flightcontrol.Group[interface{}] + g flightcontrol.Group[*resolveRemoteResult] } // NewSource creates a new image source @@ -98,32 +98,31 @@ func (is *Source) resolveLocal(refStr string) (*image.Image, error) { return img, nil } +type resolveRemoteResult struct { + ref string + dgst digest.Digest + dt []byte +} + func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) { - type t struct { - ref string - dgst digest.Digest - dt []byte - } p := platforms.DefaultSpec() if platform != nil { p = *platform } // key is used to synchronize resolutions that can happen in parallel when doing multi-stage. key := "getconfig::" + ref + "::" + platforms.Format(p) - res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) { + res, err := is.g.Do(ctx, key, func(ctx context.Context) (*resolveRemoteResult, error) { res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g) ref, dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform, []*policy.Policy{}) if err != nil { return nil, err } - return &t{ref: ref, dgst: dgst, dt: dt}, nil + return &resolveRemoteResult{ref: ref, dgst: dgst, dt: dt}, nil }) - var typed *t if err != nil { return ref, "", nil, err } - typed = res.(*t) - return typed.ref, typed.dgst, typed.dt, nil + return res.ref, res.dgst, res.dt, nil } // ResolveImageConfig returns image config for an image From 286704bf6fb1fa3988aef0dd8a53271b8ce1c824 Mon Sep 17 00:00:00 2001 From: CrazyMax Date: Tue, 1 Aug 2023 13:40:34 +0200 Subject: [PATCH 10/11] ci(buildkit): expose github runtime for gha tests This exposes `ACTIONS_RUNTIME_TOKEN` and `ACTIONS_CACHE_URL`, which are used to skip cache exporter tests, when combined with https://github.com/moby/buildkit/commit/a8789cbd4a657836d1b747a9c4eb54fed4e221bd Co-authored-by: Bjorn Neergaard Signed-off-by: CrazyMax Signed-off-by: Bjorn Neergaard --- .github/workflows/buildkit.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/buildkit.yml b/.github/workflows/buildkit.yml index 036afd2af7..6786626fb4 100644 --- a/.github/workflows/buildkit.yml +++ b/.github/workflows/buildkit.yml @@ -73,6 +73,12 @@ jobs: disabledFeatures="${disabledFeatures},merge_diff" fi echo "BUILDKIT_TEST_DISABLE_FEATURES=${disabledFeatures}" >> $GITHUB_ENV + # Expose `ACTIONS_RUNTIME_TOKEN` and `ACTIONS_CACHE_URL`, which is used + # in BuildKit's test suite to skip/unskip cache exporters: + # https://github.com/moby/buildkit/blob/567a99433ca23402d5e9b9f9124005d2e59b8861/client/client_test.go#L5407-L5411 + - + name: Expose GitHub Runtime + uses: crazy-max/ghaction-github-runtime@v2 - name: Checkout uses: actions/checkout@v3 From d5b067e04a2d30dee1d83b5caeefc595035a1ace Mon Sep 17 00:00:00 2001 From: CrazyMax Date: Mon, 24 Jul 2023 00:24:56 +0200 Subject: [PATCH 11/11] ci(buildkit): remove regex skipping tests with digest inconsistency Skipping digest-related tests is no longer necessary after https://github.com/containerd/containerd/commit/40658316526ee5e0ec91d110e5999abb0b9a5407 Signed-off-by: CrazyMax Signed-off-by: Bjorn Neergaard --- .github/workflows/buildkit.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/buildkit.yml b/.github/workflows/buildkit.yml index 6786626fb4..7321001b27 100644 --- a/.github/workflows/buildkit.yml +++ b/.github/workflows/buildkit.yml @@ -124,6 +124,5 @@ jobs: TEST_DOCKERD: "1" TEST_DOCKERD_BINARY: "./build/moby/dockerd" TESTPKGS: "./${{ matrix.pkg }}" - # Skip buildkit tests checking the digest (see https://github.com/moby/buildkit/pull/3736) - TESTFLAGS: "-v --parallel=1 --timeout=30m --run=/^Test([^R]|.[^e]|..[^p]|...[^r]|....[^o]|.....[^S])/worker=${{ matrix.worker }}$" + TESTFLAGS: "-v --parallel=1 --timeout=30m --run=//worker=${{ matrix.worker }}$" working-directory: buildkit