Merge pull request #43357 from corhere/vendor-swarmkit-and-containerd-v1.6.1
Vendor latest swarmkit, containerd v1.6.1
This commit is contained in:
commit
a583434ebc
872 changed files with 122238 additions and 43733 deletions
|
@ -52,7 +52,7 @@ func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBac
|
|||
pluginBackend: p,
|
||||
imageBackend: i,
|
||||
volumeBackend: v,
|
||||
dependencies: agent.NewDependencyManager(),
|
||||
dependencies: agent.NewDependencyManager(b.PluginGetter()),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@ import (
|
|||
"golang.org/x/sync/singleflight"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
// ContainersNamespace is the name of the namespace used for users containers
|
||||
|
@ -885,7 +886,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|||
// It is not harm to add WithBlock for containerd connection.
|
||||
grpc.WithBlock(),
|
||||
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithConnectParams(connParams),
|
||||
grpc.WithContextDialer(dialer.ContextDialer),
|
||||
|
||||
|
|
|
@ -179,7 +179,7 @@ func TestSetWindowsCredentialSpecInSpec(t *testing.T) {
|
|||
t.Run("happy path with a 'config://' option", func(t *testing.T) {
|
||||
configID := "my-cred-spec"
|
||||
|
||||
dependencyManager := swarmagent.NewDependencyManager()
|
||||
dependencyManager := swarmagent.NewDependencyManager(nil)
|
||||
dependencyManager.Configs().Add(swarmapi.Config{
|
||||
ID: configID,
|
||||
Spec: swarmapi.ConfigSpec{
|
||||
|
|
39
vendor.mod
39
vendor.mod
|
@ -18,7 +18,7 @@ require (
|
|||
github.com/bsphere/le_go v0.0.0-20170215134836-7a984a84b549
|
||||
github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5
|
||||
github.com/containerd/cgroups v1.0.3
|
||||
github.com/containerd/containerd v1.5.10
|
||||
github.com/containerd/containerd v1.6.1
|
||||
github.com/containerd/continuity v0.2.2
|
||||
github.com/containerd/fifo v1.0.0
|
||||
github.com/containerd/typeurl v1.0.2
|
||||
|
@ -33,7 +33,7 @@ require (
|
|||
github.com/docker/go-units v0.4.0
|
||||
github.com/docker/libkv v0.2.2-0.20211217103745-e480589147e3
|
||||
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4
|
||||
github.com/docker/swarmkit v1.12.1-0.20210726173615-3629f50980f6
|
||||
github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0
|
||||
github.com/fluent/fluent-logger-golang v1.9.0
|
||||
github.com/fsnotify/fsnotify v1.5.1
|
||||
github.com/godbus/dbus/v5 v5.0.6
|
||||
|
@ -74,32 +74,32 @@ require (
|
|||
github.com/tchap/go-patricia v2.3.0+incompatible
|
||||
github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274
|
||||
github.com/vbatts/tar-split v0.11.2
|
||||
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852
|
||||
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f
|
||||
go.etcd.io/bbolt v1.3.6
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2
|
||||
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa
|
||||
google.golang.org/grpc v1.40.0
|
||||
google.golang.org/grpc v1.43.0
|
||||
gotest.tools/v3 v3.1.0
|
||||
)
|
||||
|
||||
require (
|
||||
code.cloudfoundry.org/clock v1.0.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/akutz/memconn v0.1.0 // indirect
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cilium/ebpf v0.7.0 // indirect
|
||||
github.com/container-storage-interface/spec v1.5.0 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/containerd/go-runc v1.0.0 // indirect
|
||||
github.com/containerd/ttrpc v1.1.0 // indirect
|
||||
github.com/coreos/etcd v3.3.27+incompatible // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e // indirect
|
||||
github.com/gofrs/flock v0.7.3 // indirect
|
||||
github.com/gogo/googleapis v1.4.0 // indirect
|
||||
|
@ -127,13 +127,21 @@ require (
|
|||
github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee // indirect
|
||||
github.com/philhofer/fwd v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.10.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/prometheus/common v0.30.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/rexray/gocsi v1.2.2 // indirect
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||
github.com/tinylib/msgp v1.1.0 // indirect
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
|
||||
github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.2 // indirect
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.2 // indirect
|
||||
go.etcd.io/etcd/raft/v3 v3.5.2 // indirect
|
||||
go.etcd.io/etcd/server/v3 v3.5.2 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.17.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e // indirect
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
|
||||
golang.org/x/mod v0.4.2 // indirect
|
||||
|
@ -143,19 +151,16 @@ require (
|
|||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/api v0.46.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
labix.org/v2/mgo v0.0.0-20140701140051-000000000287 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
cloud.google.com/go => cloud.google.com/go v0.59.0
|
||||
cloud.google.com/go/bigquery => cloud.google.com/go/bigquery v1.0.0
|
||||
cloud.google.com/go/logging => cloud.google.com/go/logging v1.0.1-0.20190813144457-ceeb313ad77b
|
||||
github.com/armon/go-metrics => github.com/armon/go-metrics v0.0.0-20150106224455-eb0af217e5e9
|
||||
github.com/armon/go-radix => github.com/armon/go-radix v0.0.0-20150105235045-e39d623f12e8
|
||||
github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7
|
||||
github.com/coreos/pkg => github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea
|
||||
github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2
|
||||
github.com/golang/protobuf => github.com/golang/protobuf v1.3.5
|
||||
github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
|
||||
github.com/hashicorp/go-msgpack => github.com/hashicorp/go-msgpack v0.0.0-20140221154404-71c2886f5a67
|
||||
github.com/hashicorp/go-multierror => github.com/hashicorp/go-multierror v1.0.0
|
||||
|
@ -165,10 +170,10 @@ replace (
|
|||
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.11
|
||||
github.com/vishvananda/netlink => github.com/vishvananda/netlink v1.1.0
|
||||
go.opencensus.io => go.opencensus.io v0.22.3
|
||||
google.golang.org/api => google.golang.org/api v0.8.0
|
||||
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200227132054-3f1135a288c9
|
||||
google.golang.org/grpc => google.golang.org/grpc v1.27.1
|
||||
)
|
||||
|
||||
// Removes etcd dependency
|
||||
replace github.com/rexray/gocsi => github.com/dperny/gocsi v1.2.3-pre
|
||||
|
||||
// autogen/winresources/dockerd is generated a build time, this replacement is only for the purpose of `go mod vendor`
|
||||
replace github.com/docker/docker/autogen/winresources/dockerd => ./hack/make/.resources-windows
|
||||
|
|
557
vendor.sum
557
vendor.sum
File diff suppressed because it is too large
Load diff
1
vendor/cloud.google.com/go/.gitignore
generated
vendored
1
vendor/cloud.google.com/go/.gitignore
generated
vendored
|
@ -2,6 +2,7 @@
|
|||
.idea
|
||||
.vscode
|
||||
*.swp
|
||||
.history
|
||||
|
||||
# Test files
|
||||
*.test
|
||||
|
|
442
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
442
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
|
@ -1,5 +1,445 @@
|
|||
# Changes
|
||||
|
||||
|
||||
## [0.81.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.80.0...v0.81.0) (2021-04-02)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **datacatalog:** Policy Tag Manager v1 API service feat: new RenameTagTemplateFieldEnumValue API feat: adding fully_qualified_name in lookup and search feat: added DATAPROC_METASTORE integrated system along with new entry types: DATABASE and SERVICE docs: Documentation improvements ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7))
|
||||
* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([a0b1f6f](https://www.github.com/googleapis/google-cloud-go/commit/a0b1f6faae77d014fdee166ab018ddcd6f846ab4))
|
||||
* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3))
|
||||
* **dialogflow:** expose MP3_64_KBPS and MULAW for output audio encodings. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3))
|
||||
* **secretmanager:** Rotation for Secrets ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **internal/godocfx:** filter out non-Cloud ([#3878](https://www.github.com/googleapis/google-cloud-go/issues/3878)) ([625aef9](https://www.github.com/googleapis/google-cloud-go/commit/625aef9b47181cf627587cc9cde9e400713c6678))
|
||||
|
||||
## [0.80.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.79.0...v0.80.0) (2021-03-23)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* **all:** This is a breaking change in dialogflow
|
||||
|
||||
### Features
|
||||
|
||||
* **appengine:** added vm_liveness, search_api_available, network_settings, service_account, build_env_variables, kms_key_reference to v1 API ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
|
||||
* **assuredworkloads:** Add 'resource_settings' field to provide custom properties (ids) for the provisioned projects. ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
|
||||
* **assuredworkloads:** add HIPAA and HITRUST compliance regimes ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
|
||||
* **dialogflow/cx:** added fallback option when restoring an agent docs: clarified experiment length ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86))
|
||||
* **dialogflow/cx:** start generating apiv3 ([#3850](https://www.github.com/googleapis/google-cloud-go/issues/3850)) ([febbdcf](https://www.github.com/googleapis/google-cloud-go/commit/febbdcf13fcea3f5d8186c3d3dface1c0d27ef9e)), refs [#3634](https://www.github.com/googleapis/google-cloud-go/issues/3634)
|
||||
* **documentai:** add EVAL_SKIPPED value to the Provenance.OperationType enum in document.proto. ([cb43066](https://www.github.com/googleapis/google-cloud-go/commit/cb4306683926843f6e977f207fa6070bb9242a61))
|
||||
* **documentai:** start generating apiv1 ([#3853](https://www.github.com/googleapis/google-cloud-go/issues/3853)) ([d68e604](https://www.github.com/googleapis/google-cloud-go/commit/d68e604c953eea90489f6134e71849b24dd0fcbf))
|
||||
* **internal/godocfx:** add prettyprint class to code blocks ([#3819](https://www.github.com/googleapis/google-cloud-go/issues/3819)) ([6e49f21](https://www.github.com/googleapis/google-cloud-go/commit/6e49f2148b116ee439c8a882dcfeefb6e7647c57))
|
||||
* **internal/godocfx:** handle Markdown content ([#3816](https://www.github.com/googleapis/google-cloud-go/issues/3816)) ([56d5d0a](https://www.github.com/googleapis/google-cloud-go/commit/56d5d0a900197fb2de46120a0eda649f2c17448f))
|
||||
* **kms:** Add maxAttempts to retry policy for KMS gRPC service config feat: Add Bazel exports_files entry for KMS gRPC service config ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
|
||||
* **resourcesettings:** start generating apiv1 ([#3854](https://www.github.com/googleapis/google-cloud-go/issues/3854)) ([3b288b4](https://www.github.com/googleapis/google-cloud-go/commit/3b288b4fa593c6cb418f696b5b26768967c20b9e))
|
||||
* **speech:** Support output transcript to GCS for LongRunningRecognize. ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
|
||||
* **speech:** Support output transcript to GCS for LongRunningRecognize. ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86))
|
||||
* **speech:** Support output transcript to GCS for LongRunningRecognize. ([35a8706](https://www.github.com/googleapis/google-cloud-go/commit/35a870662df8bf63c4ec10a0233d1d7a708007ee))
|
||||
|
||||
|
||||
### Miscellaneous Chores
|
||||
|
||||
* **all:** auto-regenerate gapics ([#3837](https://www.github.com/googleapis/google-cloud-go/issues/3837)) ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
|
||||
|
||||
## [0.79.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.78.0...v0.79.0) (2021-03-10)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **apigateway:** start generating apiv1 ([#3726](https://www.github.com/googleapis/google-cloud-go/issues/3726)) ([66046da](https://www.github.com/googleapis/google-cloud-go/commit/66046da2a4be5971ce2655dc6a5e1fadb08c3d1f))
|
||||
* **channel:** addition of billing_account field on Plan. docs: clarification that valid address lines are required for all customers. ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215))
|
||||
* **dialogflow/cx:** allow to disable webhook invocation per request ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215))
|
||||
* **dialogflow/cx:** allow to disable webhook invocation per request ([44c6bf9](https://www.github.com/googleapis/google-cloud-go/commit/44c6bf986f39a3c9fddf46788ae63bfbb3739441))
|
||||
* **dialogflow:** Add CCAI API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
|
||||
* **documentai:** remove the translation fields in document.proto. ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
|
||||
* **documentai:** Update documentai/v1beta3 protos: add support for boolean normalized value ([529925b](https://www.github.com/googleapis/google-cloud-go/commit/529925ba79f4d3191ef80a13e566d86210fe4d25))
|
||||
* **internal/godocfx:** keep some cross links on same domain ([#3767](https://www.github.com/googleapis/google-cloud-go/issues/3767)) ([77f76ed](https://www.github.com/googleapis/google-cloud-go/commit/77f76ed09cb07a090ba9054063a7c002a35bca4e))
|
||||
* **internal:** add ability to regenerate one module's docs ([#3777](https://www.github.com/googleapis/google-cloud-go/issues/3777)) ([dc15995](https://www.github.com/googleapis/google-cloud-go/commit/dc15995521bd065da4cfaae95642588919a8c548))
|
||||
* **metastore:** added support for release channels when creating service ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
|
||||
* **metastore:** Publish Dataproc Metastore v1alpha API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
|
||||
* **metastore:** start generating apiv1alpha ([#3747](https://www.github.com/googleapis/google-cloud-go/issues/3747)) ([359312a](https://www.github.com/googleapis/google-cloud-go/commit/359312ad6d4f61fb341d41ffa35fc0634979e650))
|
||||
* **metastore:** start generating apiv1beta ([#3788](https://www.github.com/googleapis/google-cloud-go/issues/3788)) ([2977095](https://www.github.com/googleapis/google-cloud-go/commit/297709593ad32f234c0fbcfa228cffcfd3e591f4))
|
||||
* **secretmanager:** added topic field to Secret ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **analytics/admin:** add `https://www.googleapis.com/auth/analytics.edit` OAuth2 scope to the list of acceptable scopes for all read only methods of the Admin API docs: update the documentation of the `update_mask` field used by Update() methods ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01))
|
||||
* **apigateway:** Provide resource definitions for service management and IAM resources ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
|
||||
* **functions:** Fix service namespace in grpc_service_config. ([7811a34](https://www.github.com/googleapis/google-cloud-go/commit/7811a34ef64d722480c640810251bb3a0d65d495))
|
||||
* **internal/godocfx:** prevent index out of bounds when pkg == mod ([#3768](https://www.github.com/googleapis/google-cloud-go/issues/3768)) ([3d80b4e](https://www.github.com/googleapis/google-cloud-go/commit/3d80b4e93b0f7e857d6e9681d8d6a429750ecf80))
|
||||
* **internal/godocfx:** use correct anchor links ([#3738](https://www.github.com/googleapis/google-cloud-go/issues/3738)) ([919039a](https://www.github.com/googleapis/google-cloud-go/commit/919039a01a006c41e720218bd55f83ce98a5edef))
|
||||
* **internal:** fix Bash syntax ([#3779](https://www.github.com/googleapis/google-cloud-go/issues/3779)) ([3dd245d](https://www.github.com/googleapis/google-cloud-go/commit/3dd245dbdbfa84f0bbe5a476412d8463fe3e700c))
|
||||
* **tables:** use area120tables_v1alpha1.yaml as api-service-config ([#3759](https://www.github.com/googleapis/google-cloud-go/issues/3759)) ([b130ec0](https://www.github.com/googleapis/google-cloud-go/commit/b130ec0aa946b1a1eaa4d5a7c33e72353ac1612e))
|
||||
|
||||
## [0.78.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.77.0...v0.78.0) (2021-02-22)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([16597fa](https://www.github.com/googleapis/google-cloud-go/commit/16597fa1ce549053c7183e8456e23f554a5501de))
|
||||
* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
|
||||
* **dialogflow:** add additional_bindings to Dialogflow v2 ListIntents API docs: update copyrights and session docs ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
|
||||
* **documentai:** Update documentai/v1beta3 protos ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4))
|
||||
* **gkehub:** Update Membership API v1beta1 proto ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4))
|
||||
* **servicecontrol:** Update the ruby_cloud_gapic_library rules for the libraries published to google-cloud-ruby to the form that works with build_gen (separate parameters for ruby_cloud_title and ruby_cloud_description). chore: Update Bazel-Ruby rules version. chore: Update build_gen version. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
|
||||
* **speech:** Support Model Adaptation. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **dialogflow/cx:** RunTestCase http template. PHP REST client lib can be generated. feat: Support transition route group coverage for Test Cases. ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4))
|
||||
* **errorreporting:** Fixes ruby gem build ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
|
||||
|
||||
## [0.77.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.76.0...v0.77.0) (2021-02-16)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([1aea7c8](https://www.github.com/googleapis/google-cloud-go/commit/1aea7c87d39eed87620b488ba0dd60b88ff26c04))
|
||||
* **dialogflow/cx:** supports SentimentAnalysisResult in webhook request docs: minor updates in wording ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
|
||||
* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
|
||||
* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8))
|
||||
* **gkehub:** start generating apiv1beta1 ([#3698](https://www.github.com/googleapis/google-cloud-go/issues/3698)) ([8aed3bd](https://www.github.com/googleapis/google-cloud-go/commit/8aed3bd1bbbe983e4891c813e4c5dc9b3aa1b9b2))
|
||||
* **internal/docfx:** full cross reference linking ([#3656](https://www.github.com/googleapis/google-cloud-go/issues/3656)) ([fcb7318](https://www.github.com/googleapis/google-cloud-go/commit/fcb7318eb338bf3828ac831ed06ca630e1876418))
|
||||
* **memcache:** added ApplySoftwareUpdate API docs: various clarifications, new documentation for ApplySoftwareUpdate chore: update proto annotations ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
|
||||
* **networkconnectivity:** Add state field in resources docs: Minor changes ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
|
||||
* **networkconnectivity:** Add state field in resources docs: Minor changes ([b4b5898](https://www.github.com/googleapis/google-cloud-go/commit/b4b58987368f80494bbc7f651f50e9123200fb3f))
|
||||
* **recommendationengine:** start generating apiv1beta1 ([#3686](https://www.github.com/googleapis/google-cloud-go/issues/3686)) ([8f4e130](https://www.github.com/googleapis/google-cloud-go/commit/8f4e13009444d88a5a56144129f055623a2205ac))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **errorreporting:** Remove dependency on AppEngine's proto definitions. This also removes the source_references field. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
|
||||
* **errorreporting:** Update bazel builds for ER client libraries. ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
|
||||
* **internal/godocfx:** use exact list of top-level decls ([#3665](https://www.github.com/googleapis/google-cloud-go/issues/3665)) ([3cd2961](https://www.github.com/googleapis/google-cloud-go/commit/3cd2961bd7b9c29d82a21ba8850eff00c7c332fd))
|
||||
* **kms:** do not retry on 13 INTERNAL ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
|
||||
* **orgpolicy:** Fix constraint resource pattern annotation ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8))
|
||||
* **orgpolicy:** Fix constraint resource pattern annotation ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
|
||||
* **profiler:** make sure retries use the most up-to-date copy of the trailer ([#3660](https://www.github.com/googleapis/google-cloud-go/issues/3660)) ([3ba9ebc](https://www.github.com/googleapis/google-cloud-go/commit/3ba9ebcee2b8b43cdf2c8f8a3d810516a604b363))
|
||||
* **vision:** sync vision v1 protos to get extra FaceAnnotation Landmark Types ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
|
||||
|
||||
## [0.76.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.75.0...v0.76.0) (2021-02-02)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **accessapproval:** Migrate the Bazel rules for the libraries published to google-cloud-ruby to use the gapic-generator-ruby instead of the monolith generator. ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
|
||||
* **all:** auto-regenerate gapics ([#3526](https://www.github.com/googleapis/google-cloud-go/issues/3526)) ([ab2af0b](https://www.github.com/googleapis/google-cloud-go/commit/ab2af0b32630dd97f44800f4e273184f887375db))
|
||||
* **all:** auto-regenerate gapics ([#3539](https://www.github.com/googleapis/google-cloud-go/issues/3539)) ([84d4d8a](https://www.github.com/googleapis/google-cloud-go/commit/84d4d8ae2d3fbf34a4a312a0a2e4062d18caaa3d))
|
||||
* **all:** auto-regenerate gapics ([#3546](https://www.github.com/googleapis/google-cloud-go/issues/3546)) ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7))
|
||||
* **all:** auto-regenerate gapics ([#3563](https://www.github.com/googleapis/google-cloud-go/issues/3563)) ([102112a](https://www.github.com/googleapis/google-cloud-go/commit/102112a4e9285a16645aabc89789f613d4f47c9e))
|
||||
* **all:** auto-regenerate gapics ([#3576](https://www.github.com/googleapis/google-cloud-go/issues/3576)) ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
|
||||
* **all:** auto-regenerate gapics ([#3580](https://www.github.com/googleapis/google-cloud-go/issues/3580)) ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1))
|
||||
* **all:** auto-regenerate gapics ([#3587](https://www.github.com/googleapis/google-cloud-go/issues/3587)) ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f))
|
||||
* **all:** auto-regenerate gapics ([#3598](https://www.github.com/googleapis/google-cloud-go/issues/3598)) ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **appengine:** start generating apiv1 ([#3561](https://www.github.com/googleapis/google-cloud-go/issues/3561)) ([2b6a3b4](https://www.github.com/googleapis/google-cloud-go/commit/2b6a3b4609e389da418a83eb60a8ae3710d646d7))
|
||||
* **assuredworkloads:** updated google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService service. Clients can now create workloads with US_REGIONAL_ACCESS compliance regime ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **binaryauthorization:** start generating apiv1beta1 ([#3562](https://www.github.com/googleapis/google-cloud-go/issues/3562)) ([56e18a6](https://www.github.com/googleapis/google-cloud-go/commit/56e18a64836ab9482528b212eb139f649f7a35c3))
|
||||
* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8))
|
||||
* **cloudtasks:** introducing field: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats, Task.pull_message and introducing messages: QueueStats PullMessage docs: updates to max burst size description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **cloudtasks:** introducing fields: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats and introducing messages: QueueStats docs: updates to AppEngineHttpRequest description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **datalabeling:** start generating apiv1beta1 ([#3582](https://www.github.com/googleapis/google-cloud-go/issues/3582)) ([d8a7fee](https://www.github.com/googleapis/google-cloud-go/commit/d8a7feef51d3344fa7e258aba1d9fbdab56dadcf))
|
||||
* **dataqna:** start generating apiv1alpha ([#3586](https://www.github.com/googleapis/google-cloud-go/issues/3586)) ([24c5b8f](https://www.github.com/googleapis/google-cloud-go/commit/24c5b8f4f45f8cd8b3001b1ca5a8d80e9f3b39d5))
|
||||
* **dialogflow/cx:** Add new Experiment service docs: minor doc update on redact field in intent.proto and page.proto ([0959f27](https://www.github.com/googleapis/google-cloud-go/commit/0959f27e85efe94d39437ceef0ff62ddceb8e7a7))
|
||||
* **dialogflow/cx:** added support for test cases and agent validation ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **dialogflow/cx:** added support for test cases and agent validation ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f))
|
||||
* **dialogflow:** add C++ targets for DialogFlow ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7))
|
||||
* **documentai:** start generating apiv1beta3 ([#3595](https://www.github.com/googleapis/google-cloud-go/issues/3595)) ([5ae21fa](https://www.github.com/googleapis/google-cloud-go/commit/5ae21fa1cfb8b8dacbcd0fc43eee430f7db63102))
|
||||
* **domains:** start generating apiv1beta1 ([#3632](https://www.github.com/googleapis/google-cloud-go/issues/3632)) ([b8ada6f](https://www.github.com/googleapis/google-cloud-go/commit/b8ada6f197e680d0bb26aa031e6431bc099a3149))
|
||||
* **godocfx:** include alt documentation link ([#3530](https://www.github.com/googleapis/google-cloud-go/issues/3530)) ([806cdd5](https://www.github.com/googleapis/google-cloud-go/commit/806cdd56fb6fdddd7a6c1354e55e0d1259bd6c8b))
|
||||
* **internal/gapicgen:** change commit formatting to match standard ([#3500](https://www.github.com/googleapis/google-cloud-go/issues/3500)) ([d1e3d46](https://www.github.com/googleapis/google-cloud-go/commit/d1e3d46c47c425581e2b149c07f8e27ffc373c7e))
|
||||
* **internal/godocfx:** xref function declarations ([#3615](https://www.github.com/googleapis/google-cloud-go/issues/3615)) ([2bdbb87](https://www.github.com/googleapis/google-cloud-go/commit/2bdbb87a682d799cf5e262a61a3ef1faf41151af))
|
||||
* **mediatranslation:** start generating apiv1beta1 ([#3636](https://www.github.com/googleapis/google-cloud-go/issues/3636)) ([4129469](https://www.github.com/googleapis/google-cloud-go/commit/412946966cf7f53c51deff1b1cc1a12d62ed0279))
|
||||
* **memcache:** start generating apiv1 ([#3579](https://www.github.com/googleapis/google-cloud-go/issues/3579)) ([eabf7cf](https://www.github.com/googleapis/google-cloud-go/commit/eabf7cfde7b3a3cc1b35c320ba52e07be9926359))
|
||||
* **networkconnectivity:** initial generation of apiv1alpha1 ([#3567](https://www.github.com/googleapis/google-cloud-go/issues/3567)) ([adf489a](https://www.github.com/googleapis/google-cloud-go/commit/adf489a536292e3196677621477eae0d52761e7f))
|
||||
* **orgpolicy:** start generating apiv2 ([#3652](https://www.github.com/googleapis/google-cloud-go/issues/3652)) ([c103847](https://www.github.com/googleapis/google-cloud-go/commit/c1038475779fda3589aa9659d4ad0b703036b531))
|
||||
* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8))
|
||||
* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9af529c](https://www.github.com/googleapis/google-cloud-go/commit/9af529c21e98b62c4617f7a7191c307659cf8bb8))
|
||||
* **recommender:** add bindings for folder/org type resources for protos in recommendations, insights and recommender_service to enable v1 api for folder/org ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **recommender:** auto generated cl for enabling v1beta1 folder/org APIs and integration test ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **resourcemanager:** start generating apiv2 ([#3575](https://www.github.com/googleapis/google-cloud-go/issues/3575)) ([93d0ebc](https://www.github.com/googleapis/google-cloud-go/commit/93d0ebceb4270351518a13958005bb68f0cace60))
|
||||
* **secretmanager:** added expire_time and ttl fields to Secret ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1))
|
||||
* **secretmanager:** added expire_time and ttl fields to Secret ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
|
||||
* **servicecontrol:** start generating apiv1 ([#3644](https://www.github.com/googleapis/google-cloud-go/issues/3644)) ([f84938b](https://www.github.com/googleapis/google-cloud-go/commit/f84938bb4042a5629fd66bda42de028fd833648a))
|
||||
* **servicemanagement:** start generating apiv1 ([#3614](https://www.github.com/googleapis/google-cloud-go/issues/3614)) ([b96134f](https://www.github.com/googleapis/google-cloud-go/commit/b96134fe91c182237359000cd544af5fec60d7db))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **datacatalog:** Update PHP package name casing to match the PHP namespace in the proto files ([c7ecf0f](https://www.github.com/googleapis/google-cloud-go/commit/c7ecf0f3f454606b124e52d20af2545b2c68646f))
|
||||
* **internal/godocfx:** add TOC element for module root package ([#3599](https://www.github.com/googleapis/google-cloud-go/issues/3599)) ([1d6eb23](https://www.github.com/googleapis/google-cloud-go/commit/1d6eb238206fcf8815d88981527ef176851afd7a))
|
||||
* **profiler:** Force gax to retry in case of certificate errors ([#3178](https://www.github.com/googleapis/google-cloud-go/issues/3178)) ([35dcd72](https://www.github.com/googleapis/google-cloud-go/commit/35dcd725dcd03266ed7439de40c277376b38cd71))
|
||||
|
||||
## [0.75.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.74.0...v0.75.0) (2021-01-11)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **all:** auto-regenerate gapics , refs [#3514](https://www.github.com/googleapis/google-cloud-go/issues/3514) [#3501](https://www.github.com/googleapis/google-cloud-go/issues/3501) [#3497](https://www.github.com/googleapis/google-cloud-go/issues/3497) [#3455](https://www.github.com/googleapis/google-cloud-go/issues/3455) [#3448](https://www.github.com/googleapis/google-cloud-go/issues/3448)
|
||||
* **channel:** start generating apiv1 ([#3517](https://www.github.com/googleapis/google-cloud-go/issues/3517)) ([2cf3b3c](https://www.github.com/googleapis/google-cloud-go/commit/2cf3b3cf7d99f2efd6868a710fad9e935fc87965))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **internal/gapicgen:** don't regen files that have been deleted ([#3471](https://www.github.com/googleapis/google-cloud-go/issues/3471)) ([112ca94](https://www.github.com/googleapis/google-cloud-go/commit/112ca9416cc8a2502b32547dc8d789655452f84a))
|
||||
|
||||
## [0.74.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.73.0...v0.74.0) (2020-12-10)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **all:** auto-regenerate gapics , refs [#3440](https://www.github.com/googleapis/google-cloud-go/issues/3440) [#3436](https://www.github.com/googleapis/google-cloud-go/issues/3436) [#3394](https://www.github.com/googleapis/google-cloud-go/issues/3394) [#3391](https://www.github.com/googleapis/google-cloud-go/issues/3391) [#3374](https://www.github.com/googleapis/google-cloud-go/issues/3374)
|
||||
* **internal/gapicgen:** support generating only gapics with genlocal ([#3383](https://www.github.com/googleapis/google-cloud-go/issues/3383)) ([eaa742a](https://www.github.com/googleapis/google-cloud-go/commit/eaa742a248dc7d93c019863248f28e37f88aae84))
|
||||
* **servicedirectory:** start generating apiv1 ([#3382](https://www.github.com/googleapis/google-cloud-go/issues/3382)) ([2774925](https://www.github.com/googleapis/google-cloud-go/commit/2774925925909071ebc585cf7400373334c156ba))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **internal/gapicgen:** don't create genproto pr as draft ([#3379](https://www.github.com/googleapis/google-cloud-go/issues/3379)) ([517ab0f](https://www.github.com/googleapis/google-cloud-go/commit/517ab0f25e544498c5374b256354bc41ba936ad5))
|
||||
|
||||
## [0.73.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.72.0...v0.73.0) (2020-12-04)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **all:** auto-regenerate gapics , refs [#3335](https://www.github.com/googleapis/google-cloud-go/issues/3335) [#3294](https://www.github.com/googleapis/google-cloud-go/issues/3294) [#3250](https://www.github.com/googleapis/google-cloud-go/issues/3250) [#3229](https://www.github.com/googleapis/google-cloud-go/issues/3229) [#3211](https://www.github.com/googleapis/google-cloud-go/issues/3211) [#3217](https://www.github.com/googleapis/google-cloud-go/issues/3217) [#3212](https://www.github.com/googleapis/google-cloud-go/issues/3212) [#3209](https://www.github.com/googleapis/google-cloud-go/issues/3209) [#3206](https://www.github.com/googleapis/google-cloud-go/issues/3206) [#3199](https://www.github.com/googleapis/google-cloud-go/issues/3199)
|
||||
* **artifactregistry:** start generating apiv1beta2 ([#3352](https://www.github.com/googleapis/google-cloud-go/issues/3352)) ([2e6f20b](https://www.github.com/googleapis/google-cloud-go/commit/2e6f20b0ab438b0b366a1a3802fc64d1a0e66fff))
|
||||
* **internal:** copy pubsub Message and PublishResult to internal/pubsub ([#3351](https://www.github.com/googleapis/google-cloud-go/issues/3351)) ([82521ee](https://www.github.com/googleapis/google-cloud-go/commit/82521ee5038735c1663525658d27e4df00ec90be))
|
||||
* **internal/gapicgen:** support adding context to regen ([#3174](https://www.github.com/googleapis/google-cloud-go/issues/3174)) ([941ab02](https://www.github.com/googleapis/google-cloud-go/commit/941ab029ba6f7f33e8b2e31e3818aeb68312a999))
|
||||
* **internal/kokoro:** add ability to regen all DocFX YAML ([#3191](https://www.github.com/googleapis/google-cloud-go/issues/3191)) ([e12046b](https://www.github.com/googleapis/google-cloud-go/commit/e12046bc4431d33aee72c324e6eb5cc907a4214a))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **internal/godocfx:** filter out test packages from other modules ([#3197](https://www.github.com/googleapis/google-cloud-go/issues/3197)) ([1d397aa](https://www.github.com/googleapis/google-cloud-go/commit/1d397aa8b41f8f980cba1d3dcc50f11e4d4f4ca0))
|
||||
|
||||
## [0.72.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.71.0...v0.72.0) (2020-11-10)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **all:** auto-regenerate gapics , refs [#3177](https://www.github.com/googleapis/google-cloud-go/issues/3177) [#3164](https://www.github.com/googleapis/google-cloud-go/issues/3164) [#3149](https://www.github.com/googleapis/google-cloud-go/issues/3149) [#3142](https://www.github.com/googleapis/google-cloud-go/issues/3142) [#3136](https://www.github.com/googleapis/google-cloud-go/issues/3136) [#3130](https://www.github.com/googleapis/google-cloud-go/issues/3130) [#3121](https://www.github.com/googleapis/google-cloud-go/issues/3121) [#3119](https://www.github.com/googleapis/google-cloud-go/issues/3119)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **all:** Update hand-written clients to not use WithEndpoint override ([#3111](https://www.github.com/googleapis/google-cloud-go/issues/3111)) ([f0cfd05](https://www.github.com/googleapis/google-cloud-go/commit/f0cfd0532f5204ff16f7bae406efa72603d16f44))
|
||||
* **internal/godocfx:** rename README files to pkg-readme ([#3185](https://www.github.com/googleapis/google-cloud-go/issues/3185)) ([d3a8571](https://www.github.com/googleapis/google-cloud-go/commit/d3a85719be411b692aede3331abb29b5a7b3da9a))
|
||||
|
||||
|
||||
## [0.71.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.70.0...v0.71.0) (2020-10-30)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **all:** auto-regenerate gapics , refs [#3115](https://www.github.com/googleapis/google-cloud-go/issues/3115) [#3106](https://www.github.com/googleapis/google-cloud-go/issues/3106) [#3102](https://www.github.com/googleapis/google-cloud-go/issues/3102) [#3083](https://www.github.com/googleapis/google-cloud-go/issues/3083) [#3073](https://www.github.com/googleapis/google-cloud-go/issues/3073) [#3057](https://www.github.com/googleapis/google-cloud-go/issues/3057) [#3044](https://www.github.com/googleapis/google-cloud-go/issues/3044)
|
||||
* **billing/budgets:** start generating apiv1 ([#3099](https://www.github.com/googleapis/google-cloud-go/issues/3099)) ([e760c85](https://www.github.com/googleapis/google-cloud-go/commit/e760c859de88a6e79b6dffc653dbf75f1630d8e3))
|
||||
* **internal:** auto-run godocfx on new mods ([#3069](https://www.github.com/googleapis/google-cloud-go/issues/3069)) ([49f497e](https://www.github.com/googleapis/google-cloud-go/commit/49f497eab80ce34dfb4ca41f033a5c0429ff5e42))
|
||||
* **pubsublite:** Added Pub/Sub Lite clients and routing headers ([#3105](https://www.github.com/googleapis/google-cloud-go/issues/3105)) ([98668fa](https://www.github.com/googleapis/google-cloud-go/commit/98668fa5457d26ed34debee708614f027020e5bc))
|
||||
* **pubsublite:** Message type and message routers ([#3077](https://www.github.com/googleapis/google-cloud-go/issues/3077)) ([179fc55](https://www.github.com/googleapis/google-cloud-go/commit/179fc550b545a5344358a243da7007ffaa7b5171))
|
||||
* **pubsublite:** Pub/Sub Lite admin client ([#3036](https://www.github.com/googleapis/google-cloud-go/issues/3036)) ([749473e](https://www.github.com/googleapis/google-cloud-go/commit/749473ead30bf1872634821d3238d1299b99acc6))
|
||||
* **pubsublite:** Publish settings and errors ([#3075](https://www.github.com/googleapis/google-cloud-go/issues/3075)) ([9eb9fcb](https://www.github.com/googleapis/google-cloud-go/commit/9eb9fcb79f17ad7c08c77c455ba3e8d89e3bdbf2))
|
||||
* **pubsublite:** Retryable stream wrapper ([#3068](https://www.github.com/googleapis/google-cloud-go/issues/3068)) ([97cfd45](https://www.github.com/googleapis/google-cloud-go/commit/97cfd4587f2f51996bd685ff486308b70eb51900))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **internal/kokoro:** remove unnecessary cd ([#3071](https://www.github.com/googleapis/google-cloud-go/issues/3071)) ([c1a4c3e](https://www.github.com/googleapis/google-cloud-go/commit/c1a4c3eaffcdc3cffe0e223fcfa1f60879cd23bb))
|
||||
* **pubsublite:** Disable integration tests for project id ([#3087](https://www.github.com/googleapis/google-cloud-go/issues/3087)) ([a0982f7](https://www.github.com/googleapis/google-cloud-go/commit/a0982f79d6461feabdf31363f29fed7dc5677fe7))
|
||||
|
||||
## [0.70.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.70.0) (2020-10-19)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **all:** auto-regenerate gapics , refs [#3047](https://www.github.com/googleapis/google-cloud-go/issues/3047) [#3035](https://www.github.com/googleapis/google-cloud-go/issues/3035) [#3025](https://www.github.com/googleapis/google-cloud-go/issues/3025)
|
||||
* **managedidentities:** start generating apiv1 ([#3032](https://www.github.com/googleapis/google-cloud-go/issues/3032)) ([10ccca2](https://www.github.com/googleapis/google-cloud-go/commit/10ccca238074d24fea580a4cd8e64478818b0b44))
|
||||
* **pubsublite:** Types for resource paths and topic/subscription configs ([#3026](https://www.github.com/googleapis/google-cloud-go/issues/3026)) ([6f7fa86](https://www.github.com/googleapis/google-cloud-go/commit/6f7fa86ed906258f98d996aab40184f3a46f9714))
|
||||
|
||||
## [0.69.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.69.1) (2020-10-14)
|
||||
|
||||
This is an empty release that was created solely to aid in pubsublite's module
|
||||
carve out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
|
||||
|
||||
## [0.69.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.68.0...v0.69.0) (2020-10-14)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **accessapproval:** start generating apiv1 ([#3002](https://www.github.com/googleapis/google-cloud-go/issues/3002)) ([709d6e7](https://www.github.com/googleapis/google-cloud-go/commit/709d6e76393e6ac00ff488efd83bfe873173b045))
|
||||
* **all:** auto-regenerate gapics , refs [#3010](https://www.github.com/googleapis/google-cloud-go/issues/3010) [#3005](https://www.github.com/googleapis/google-cloud-go/issues/3005) [#2993](https://www.github.com/googleapis/google-cloud-go/issues/2993) [#2989](https://www.github.com/googleapis/google-cloud-go/issues/2989) [#2981](https://www.github.com/googleapis/google-cloud-go/issues/2981) [#2976](https://www.github.com/googleapis/google-cloud-go/issues/2976) [#2968](https://www.github.com/googleapis/google-cloud-go/issues/2968) [#2958](https://www.github.com/googleapis/google-cloud-go/issues/2958)
|
||||
* **cmd/go-cloud-debug-agent:** mark as deprecated ([#2964](https://www.github.com/googleapis/google-cloud-go/issues/2964)) ([276ec88](https://www.github.com/googleapis/google-cloud-go/commit/276ec88b05852c33a3ba437e18d072f7ffd8fd33))
|
||||
* **godocfx:** add nesting to TOC ([#2972](https://www.github.com/googleapis/google-cloud-go/issues/2972)) ([3a49b2d](https://www.github.com/googleapis/google-cloud-go/commit/3a49b2d142a353f98429235c3f380431430b4dbf))
|
||||
* **internal/godocfx:** HTML-ify package summary ([#2986](https://www.github.com/googleapis/google-cloud-go/issues/2986)) ([9e64b01](https://www.github.com/googleapis/google-cloud-go/commit/9e64b018255bd8d9b31d60e8f396966251de946b))
|
||||
* **internal/kokoro:** make publish_docs VERSION optional ([#2979](https://www.github.com/googleapis/google-cloud-go/issues/2979)) ([76e35f6](https://www.github.com/googleapis/google-cloud-go/commit/76e35f689cb60bd5db8e14b8c8d367c5902bcb0e))
|
||||
* **websecurityscanner:** start generating apiv1 ([#3006](https://www.github.com/googleapis/google-cloud-go/issues/3006)) ([1d92e20](https://www.github.com/googleapis/google-cloud-go/commit/1d92e2062a13f62d7a96be53a7354c0cacca6a85))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **godocfx:** make extra files optional, filter out third_party ([#2985](https://www.github.com/googleapis/google-cloud-go/issues/2985)) ([f268921](https://www.github.com/googleapis/google-cloud-go/commit/f2689214a24b2e325d3e8f54441bb11fbef925f0))
|
||||
|
||||
## [0.68.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.67.0...v0.68.0) (2020-10-02)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **all:** auto-regenerate gapics , refs [#2952](https://www.github.com/googleapis/google-cloud-go/issues/2952) [#2944](https://www.github.com/googleapis/google-cloud-go/issues/2944) [#2935](https://www.github.com/googleapis/google-cloud-go/issues/2935)
|
||||
|
||||
## [0.67.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.66.0...v0.67.0) (2020-09-29)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **all:** auto-regenerate gapics , refs [#2933](https://www.github.com/googleapis/google-cloud-go/issues/2933) [#2919](https://www.github.com/googleapis/google-cloud-go/issues/2919) [#2913](https://www.github.com/googleapis/google-cloud-go/issues/2913) [#2910](https://www.github.com/googleapis/google-cloud-go/issues/2910) [#2899](https://www.github.com/googleapis/google-cloud-go/issues/2899) [#2897](https://www.github.com/googleapis/google-cloud-go/issues/2897) [#2886](https://www.github.com/googleapis/google-cloud-go/issues/2886) [#2877](https://www.github.com/googleapis/google-cloud-go/issues/2877) [#2869](https://www.github.com/googleapis/google-cloud-go/issues/2869) [#2864](https://www.github.com/googleapis/google-cloud-go/issues/2864)
|
||||
* **assuredworkloads:** start generating apiv1beta1 ([#2866](https://www.github.com/googleapis/google-cloud-go/issues/2866)) ([7598c4d](https://www.github.com/googleapis/google-cloud-go/commit/7598c4dd2462e8270a2c7b1f496af58ca81ff568))
|
||||
* **dialogflow/cx:** start generating apiv3beta1 ([#2875](https://www.github.com/googleapis/google-cloud-go/issues/2875)) ([37ca93a](https://www.github.com/googleapis/google-cloud-go/commit/37ca93ad69eda363d956f0174d444ed5914f5a72))
|
||||
* **docfx:** add support for examples ([#2884](https://www.github.com/googleapis/google-cloud-go/issues/2884)) ([0cc0de3](https://www.github.com/googleapis/google-cloud-go/commit/0cc0de300d58be6d3b7eeb2f1baebfa6df076830))
|
||||
* **godocfx:** include README in output ([#2927](https://www.github.com/googleapis/google-cloud-go/issues/2927)) ([f084690](https://www.github.com/googleapis/google-cloud-go/commit/f084690a2ea08ce73bafaaced95ad271fd01e11e))
|
||||
* **talent:** start generating apiv4 ([#2871](https://www.github.com/googleapis/google-cloud-go/issues/2871)) ([5c98071](https://www.github.com/googleapis/google-cloud-go/commit/5c98071b03822c58862d1fa5442ff36d627f1a61))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **godocfx:** filter out other modules, sort pkgs ([#2894](https://www.github.com/googleapis/google-cloud-go/issues/2894)) ([868db45](https://www.github.com/googleapis/google-cloud-go/commit/868db45e2e6f4e9ad48432be86c849f335e1083d))
|
||||
* **godocfx:** shorten function names ([#2880](https://www.github.com/googleapis/google-cloud-go/issues/2880)) ([48a0217](https://www.github.com/googleapis/google-cloud-go/commit/48a0217930750c1f4327f2622b0f2a3ec8afc0b7))
|
||||
* **translate:** properly name examples ([#2892](https://www.github.com/googleapis/google-cloud-go/issues/2892)) ([c19e141](https://www.github.com/googleapis/google-cloud-go/commit/c19e1415e6fa76b7ea66a7fc67ad3ba22670a2ba)), refs [#2883](https://www.github.com/googleapis/google-cloud-go/issues/2883)
|
||||
|
||||
## [0.66.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.65.0...v0.66.0) (2020-09-15)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **all:** auto-regenerate gapics , refs [#2849](https://www.github.com/googleapis/google-cloud-go/issues/2849) [#2843](https://www.github.com/googleapis/google-cloud-go/issues/2843) [#2841](https://www.github.com/googleapis/google-cloud-go/issues/2841) [#2819](https://www.github.com/googleapis/google-cloud-go/issues/2819) [#2816](https://www.github.com/googleapis/google-cloud-go/issues/2816) [#2809](https://www.github.com/googleapis/google-cloud-go/issues/2809) [#2801](https://www.github.com/googleapis/google-cloud-go/issues/2801) [#2795](https://www.github.com/googleapis/google-cloud-go/issues/2795) [#2791](https://www.github.com/googleapis/google-cloud-go/issues/2791) [#2788](https://www.github.com/googleapis/google-cloud-go/issues/2788) [#2781](https://www.github.com/googleapis/google-cloud-go/issues/2781)
|
||||
* **analytics/data:** start generating apiv1alpha ([#2796](https://www.github.com/googleapis/google-cloud-go/issues/2796)) ([e93132c](https://www.github.com/googleapis/google-cloud-go/commit/e93132c77725de3c80c34d566df269eabfcfde93))
|
||||
* **area120/tables:** start generating apiv1alpha1 ([#2807](https://www.github.com/googleapis/google-cloud-go/issues/2807)) ([9e5a4d0](https://www.github.com/googleapis/google-cloud-go/commit/9e5a4d0dee0d83be0c020797a2f579d9e42ef521))
|
||||
* **cloudbuild:** Start generating apiv1/v3 ([#2830](https://www.github.com/googleapis/google-cloud-go/issues/2830)) ([358a536](https://www.github.com/googleapis/google-cloud-go/commit/358a5368da64cf4868551652e852ceb453504f64))
|
||||
* **godocfx:** create Go DocFX YAML generator ([#2854](https://www.github.com/googleapis/google-cloud-go/issues/2854)) ([37c70ac](https://www.github.com/googleapis/google-cloud-go/commit/37c70acd91768567106ff3b2b130835998d974c5))
|
||||
* **security/privateca:** start generating apiv1beta1 ([#2806](https://www.github.com/googleapis/google-cloud-go/issues/2806)) ([f985141](https://www.github.com/googleapis/google-cloud-go/commit/f9851412183989dc69733a7e61ad39a9378cd893))
|
||||
* **video/transcoder:** start generating apiv1beta1 ([#2797](https://www.github.com/googleapis/google-cloud-go/issues/2797)) ([390dda8](https://www.github.com/googleapis/google-cloud-go/commit/390dda8ff2c526e325e434ad0aec778b7aa97ea4))
|
||||
* **workflows:** start generating apiv1beta ([#2799](https://www.github.com/googleapis/google-cloud-go/issues/2799)) ([0e39665](https://www.github.com/googleapis/google-cloud-go/commit/0e39665ccb788caec800e2887d433ca6e0cf9901))
|
||||
* **workflows/executions:** start generating apiv1beta ([#2800](https://www.github.com/googleapis/google-cloud-go/issues/2800)) ([7eaa0d1](https://www.github.com/googleapis/google-cloud-go/commit/7eaa0d184c6a2141d8bf4514b3fd20715b50a580))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **internal/kokoro:** install the right version of docuploader ([#2861](https://www.github.com/googleapis/google-cloud-go/issues/2861)) ([d8489c1](https://www.github.com/googleapis/google-cloud-go/commit/d8489c141b8b02e83d6426f4baebd3658ae11639))
|
||||
* **internal/kokoro:** remove extra dash in doc tarball ([#2862](https://www.github.com/googleapis/google-cloud-go/issues/2862)) ([690ddcc](https://www.github.com/googleapis/google-cloud-go/commit/690ddccc5202b5a70f1afa5c518dca37b6a0861c))
|
||||
* **profiler:** do not collect disabled profile types ([#2836](https://www.github.com/googleapis/google-cloud-go/issues/2836)) ([faeb498](https://www.github.com/googleapis/google-cloud-go/commit/faeb4985bf6afdcddba4553efa874642bf7f08ed)), refs [#2835](https://www.github.com/googleapis/google-cloud-go/issues/2835)
|
||||
|
||||
|
||||
### Reverts
|
||||
|
||||
* **cloudbuild): "feat(cloudbuild:** Start generating apiv1/v3" ([#2840](https://www.github.com/googleapis/google-cloud-go/issues/2840)) ([3aaf755](https://www.github.com/googleapis/google-cloud-go/commit/3aaf755476dfea1700986fc086f53fc1ab756557))
|
||||
|
||||
## [0.65.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.64.0...v0.65.0) (2020-08-27)
|
||||
|
||||
|
||||
### Announcements
|
||||
|
||||
The following changes will be included in an upcoming release and are not
|
||||
included in this one.
|
||||
|
||||
#### Default Deadlines
|
||||
|
||||
By default, non-streaming methods, like Create or Get methods, will have a
|
||||
default deadline applied to the context provided at call time, unless a context
|
||||
deadline is already set. Streaming methods have no default deadline and will run
|
||||
indefinitely, unless the context provided at call time contains a deadline.
|
||||
|
||||
To opt-out of this behavior, set the environment variable
|
||||
`GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE` to `true` prior to
|
||||
initializing a client. This opt-out mechanism will be removed in a later
|
||||
release, with a notice similar to this one ahead of its removal.
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **all:** auto-regenerate gapics , refs [#2774](https://www.github.com/googleapis/google-cloud-go/issues/2774) [#2764](https://www.github.com/googleapis/google-cloud-go/issues/2764)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **all:** correct minor typos ([#2756](https://www.github.com/googleapis/google-cloud-go/issues/2756)) ([03d78b5](https://www.github.com/googleapis/google-cloud-go/commit/03d78b5627819cb64d1f3866f90043f709e825e1))
|
||||
* **compute/metadata:** remove leading slash for Get suffix ([#2760](https://www.github.com/googleapis/google-cloud-go/issues/2760)) ([f0d605c](https://www.github.com/googleapis/google-cloud-go/commit/f0d605ccf32391a9da056a2c551158bd076c128d))
|
||||
|
||||
## [0.64.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.63.0...v0.64.0) (2020-08-18)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **all:** auto-regenerate gapics , refs [#2734](https://www.github.com/googleapis/google-cloud-go/issues/2734) [#2731](https://www.github.com/googleapis/google-cloud-go/issues/2731) [#2730](https://www.github.com/googleapis/google-cloud-go/issues/2730) [#2725](https://www.github.com/googleapis/google-cloud-go/issues/2725) [#2722](https://www.github.com/googleapis/google-cloud-go/issues/2722) [#2706](https://www.github.com/googleapis/google-cloud-go/issues/2706)
|
||||
* **pubsublite:** start generating v1 ([#2700](https://www.github.com/googleapis/google-cloud-go/issues/2700)) ([d2e777f](https://www.github.com/googleapis/google-cloud-go/commit/d2e777f56e08146646b3ffb7a78856795094ab4e))
|
||||
|
||||
## [0.63.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.62.0...v0.63.0) (2020-08-05)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **all:** auto-regenerate gapics ([#2682](https://www.github.com/googleapis/google-cloud-go/issues/2682)) ([63bfd63](https://www.github.com/googleapis/google-cloud-go/commit/63bfd638da169e0f1f4fa4a5125da2955022dc04))
|
||||
* **analytics/admin:** start generating apiv1alpha ([#2670](https://www.github.com/googleapis/google-cloud-go/issues/2670)) ([268199e](https://www.github.com/googleapis/google-cloud-go/commit/268199e5350a64a83ecf198e0e0fa4863f00fa6c))
|
||||
* **functions/metadata:** Special-case marshaling ([#2669](https://www.github.com/googleapis/google-cloud-go/issues/2669)) ([d8d7fc6](https://www.github.com/googleapis/google-cloud-go/commit/d8d7fc66cbc42f79bec25fb0daaf53d926e3645b))
|
||||
* **gaming:** start generate apiv1 ([#2681](https://www.github.com/googleapis/google-cloud-go/issues/2681)) ([1adfd0a](https://www.github.com/googleapis/google-cloud-go/commit/1adfd0aed6b2c0e1dd0c575a5ec0f49388fa5601))
|
||||
* **internal/kokoro:** add script to test compatibility with samples ([#2637](https://www.github.com/googleapis/google-cloud-go/issues/2637)) ([f2aa76a](https://www.github.com/googleapis/google-cloud-go/commit/f2aa76a0058e86c1c33bb634d2c084b58f77ab32))
|
||||
|
||||
## v0.62.0
|
||||
|
||||
### Announcements
|
||||
|
||||
- There was a breaking change to `cloud.google.com/go/dataproc/apiv1` that was
|
||||
merged in [this PR](https://github.com/googleapis/google-cloud-go/pull/2606).
|
||||
This fixed a broken API response for `DiagnoseCluster`. When polling on the
|
||||
Long Running Operation(LRO), the API now returns
|
||||
`(*dataprocpb.DiagnoseClusterResults, error)` whereas it only returned an
|
||||
`error` before.
|
||||
|
||||
### Changes
|
||||
|
||||
- all:
|
||||
- Updated all direct dependencies.
|
||||
- Updated contributing guidelines to suggest allowing edits from maintainers.
|
||||
- billing/budgets:
|
||||
- Start generating client for apiv1beta1.
|
||||
- functions:
|
||||
- Start generating client for apiv1.
|
||||
- notebooks:
|
||||
- Start generating client apiv1beta1.
|
||||
- profiler:
|
||||
- update proftest to support parsing floating-point backoff durations.
|
||||
- Fix the regexp used to parse backoff duration.
|
||||
- Various updates to autogenerated clients.
|
||||
|
||||
## v0.61.0
|
||||
|
||||
### Changes
|
||||
|
||||
- all:
|
||||
- Update all direct dependencies.
|
||||
- dashboard:
|
||||
- Start generating client for apiv1.
|
||||
- policytroubleshooter:
|
||||
- Start generating client for apiv1.
|
||||
- profiler:
|
||||
- Disable OpenCensus Telemetry for requests made by the profiler package by default. You can re-enable it using `profiler.Config.EnableOCTelemetry`.
|
||||
- Various updates to autogenerated clients.
|
||||
|
||||
## v0.60.0
|
||||
|
||||
### Changes
|
||||
|
||||
- all:
|
||||
- Refactored examples to reduce module dependencies.
|
||||
- Update sub-modules to use cloud.google.com/go v0.59.0.
|
||||
- internal:
|
||||
- Start generating client for gaming apiv1beta.
|
||||
- Various updates to autogenerated clients.
|
||||
|
||||
## v0.59.0
|
||||
|
||||
### Announcements
|
||||
|
@ -1538,5 +1978,3 @@ Natural Language.
|
|||
[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging).
|
||||
This client uses gRPC as its transport layer, and supports log reading, sinks
|
||||
and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly.
|
||||
|
||||
|
||||
|
|
77
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
77
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
|
@ -21,21 +21,25 @@
|
|||
`cd google-cloud-go`
|
||||
|
||||
1. Fork the repo.
|
||||
|
||||
|
||||
1. Set your fork as a remote:
|
||||
`git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git`
|
||||
|
||||
1. Make changes (see [Formatting](#formatting) and [Style](#style)), commit to
|
||||
your fork.
|
||||
1. Make changes, commit to your fork.
|
||||
|
||||
Commit messages should follow the
|
||||
[Go project style](https://github.com/golang/go/wiki/CommitMessage). For example:
|
||||
[Conventional Commits Style](https://www.conventionalcommits.org). The scope
|
||||
portion should always be filled with the name of the package affected by the
|
||||
changes being made. For example:
|
||||
```
|
||||
functions: add gophers codelab
|
||||
feat(functions): add gophers codelab
|
||||
```
|
||||
|
||||
1. Send a pull request with your changes.
|
||||
|
||||
To minimize friction, consider setting `Allow edits from maintainers` on the
|
||||
PR, which will enable project committers and automation to update your PR.
|
||||
|
||||
1. A maintainer will review the pull request and make comments.
|
||||
|
||||
Prefer adding additional commits over amending and force-pushing since it can
|
||||
|
@ -43,7 +47,13 @@
|
|||
|
||||
Commits will be squashed when they're merged.
|
||||
|
||||
## Integration Tests
|
||||
## Testing
|
||||
|
||||
We test code against two versions of Go, the minimum and maximum versions
|
||||
supported by our clients. To see which versions these are checkout our
|
||||
[README](README.md#supported-versions).
|
||||
|
||||
### Integration Tests
|
||||
|
||||
In addition to the unit tests, you may run the integration test suite. These
|
||||
directions describe setting up your environment to run integration tests for
|
||||
|
@ -93,7 +103,8 @@ Next, ensure the following APIs are enabled in the general project:
|
|||
- Google Compute Engine Instance Group Updater API
|
||||
- Google Compute Engine Instance Groups API
|
||||
- Kubernetes Engine API
|
||||
- Stackdriver Error Reporting API
|
||||
- Cloud Error Reporting API
|
||||
- Pub/Sub Lite API
|
||||
|
||||
Next, create a Datastore database in the general project, and a Firestore
|
||||
database in the Firestore project.
|
||||
|
@ -118,10 +129,13 @@ project's service account.
|
|||
(e.g. doorway-cliff-677) for the Firestore project.
|
||||
- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the
|
||||
Firestore project's service account.
|
||||
- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API created above.
|
||||
|
||||
As part of the setup that follows, the following variables will be configured:
|
||||
|
||||
- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests,
|
||||
in the form
|
||||
"projects/P/locations/L/keyRings/R". The creation of this is described below.
|
||||
- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API.
|
||||
- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone.
|
||||
|
||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it to
|
||||
|
@ -140,7 +154,7 @@ $ gcloud auth login
|
|||
$ gcloud datastore indexes create datastore/testdata/index.yaml
|
||||
|
||||
# Creates a Google Cloud storage bucket with the same name as your test project,
|
||||
# and with the Stackdriver Logging service account as owner, for the sink
|
||||
# and with the Cloud Logging service account as owner, for the sink
|
||||
# integration tests in logging.
|
||||
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
@ -148,7 +162,7 @@ $ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
|||
# Creates a PubSub topic for integration tests of storage notifications.
|
||||
$ gcloud beta pubsub topics create go-storage-notification-test
|
||||
# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user
|
||||
# "service-<numberic project id>@gs-project-accounts.iam.gserviceaccount.com"
|
||||
# "service-<numeric project id>@gs-project-accounts.iam.gserviceaccount.com"
|
||||
# as a publisher to that topic.
|
||||
|
||||
# Creates a Spanner instance for the spanner integration tests.
|
||||
|
@ -167,7 +181,38 @@ $ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --pu
|
|||
# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
|
||||
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
|
||||
# Authorizes Google Cloud Storage to encrypt and decrypt using key1.
|
||||
gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
|
||||
$ gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
|
||||
```
|
||||
|
||||
It may be useful to add exports to your shell initialization for future use.
|
||||
For instance, in `.zshrc`:
|
||||
|
||||
```sh
|
||||
#### START GO SDK Test Variables
|
||||
# Developers Console project's ID (e.g. bamboo-shift-455) for the general project.
|
||||
export GCLOUD_TESTS_GOLANG_PROJECT_ID=your-project
|
||||
|
||||
# The path to the JSON key file of the general project's service account.
|
||||
export GCLOUD_TESTS_GOLANG_KEY=~/directory/your-project-abcd1234.json
|
||||
|
||||
# Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project.
|
||||
export GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID=your-firestore-project
|
||||
|
||||
# The path to the JSON key file of the Firestore project's service account.
|
||||
export GCLOUD_TESTS_GOLANG_FIRESTORE_KEY=~/directory/your-firestore-project-abcd1234.json
|
||||
|
||||
# The full name of the keyring for the tests, in the form "projects/P/locations/L/keyRings/R".
|
||||
# The creation of this is described below.
|
||||
export MY_KEYRING=my-golang-sdk-test
|
||||
export MY_LOCATION=global
|
||||
export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
|
||||
|
||||
# API key for using the Translate API.
|
||||
export GCLOUD_TESTS_API_KEY=abcdefghijk123456789
|
||||
|
||||
# Compute Engine zone. (https://cloud.google.com/compute/docs/regions-zones)
|
||||
export GCLOUD_TESTS_GOLANG_ZONE=your-chosen-region
|
||||
#### END GO SDK Test Variables
|
||||
```
|
||||
|
||||
#### Running
|
||||
|
@ -176,7 +221,15 @@ Once you've done the necessary setup, you can run the integration tests by
|
|||
running:
|
||||
|
||||
``` sh
|
||||
$ go test -v cloud.google.com/go/...
|
||||
$ go test -v ./...
|
||||
```
|
||||
|
||||
Note that the above command will not run the tests in other modules. To run
|
||||
tests on other modules, first navigate to the appropriate
|
||||
subdirectory. For instance, to run only the tests for datastore:
|
||||
``` sh
|
||||
$ cd datastore
|
||||
$ go test -v ./...
|
||||
```
|
||||
|
||||
#### Replay
|
||||
|
|
103
vendor/cloud.google.com/go/README.md
generated
vendored
103
vendor/cloud.google.com/go/README.md
generated
vendored
|
@ -1,6 +1,6 @@
|
|||
# Google Cloud Client Libraries for Go
|
||||
|
||||
[](https://pkg.go.dev/cloud.google.com/go)
|
||||
[](https://pkg.go.dev/cloud.google.com/go)
|
||||
|
||||
Go packages for [Google Cloud Platform](https://cloud.google.com) services.
|
||||
|
||||
|
@ -25,52 +25,51 @@ To install the packages on your system, *do not clone the repo*. Instead:
|
|||
**NOTE:** Some of these packages are under development, and may occasionally
|
||||
make backwards-incompatible changes.
|
||||
|
||||
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
|
||||
|
||||
## Supported APIs
|
||||
|
||||
Google API | Status | Package
|
||||
------------------------------------------------|--------------|-----------------------------------------------------------
|
||||
[Asset][cloud-asset] | stable | [`cloud.google.com/go/asset/apiv1`](https://pkg.go.dev/cloud.google.com/go/asset/v1beta)
|
||||
[Automl][cloud-automl] | stable | [`cloud.google.com/go/automl/apiv1`](https://pkg.go.dev/cloud.google.com/go/automl/apiv1)
|
||||
[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery)
|
||||
[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`](https://pkg.go.dev/cloud.google.com/go/bigtable)
|
||||
[Cloudbuild][cloud-build] | stable | [`cloud.google.com/go/cloudbuild/apiv1`](https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1)
|
||||
[Cloudtasks][cloud-tasks] | stable | [`cloud.google.com/go/cloudtasks/apiv2`](https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2)
|
||||
[Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`](https://pkg.go.dev/cloud.google.com/go/container/apiv1)
|
||||
[ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1`](https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1)
|
||||
[Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`](https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1)
|
||||
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`](https://pkg.go.dev/cloud.google.com/go/datastore)
|
||||
[Debugger][cloud-debugger] | stable | [`cloud.google.com/go/debugger/apiv2`](https://pkg.go.dev/cloud.google.com/go/debugger/apiv2)
|
||||
[Dialogflow][cloud-dialogflow] | stable | [`cloud.google.com/go/dialogflow/apiv2`](https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2)
|
||||
[Data Loss Prevention][cloud-dlp] | stable | [`cloud.google.com/go/dlp/apiv2`](https://pkg.go.dev/cloud.google.com/go/dlp/apiv2)
|
||||
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`](https://pkg.go.dev/cloud.google.com/go/errorreporting)
|
||||
[Firestore][cloud-firestore] | stable | [`cloud.google.com/go/firestore`](https://pkg.go.dev/cloud.google.com/go/firestore)
|
||||
[IAM][cloud-iam] | stable | [`cloud.google.com/go/iam`](https://pkg.go.dev/cloud.google.com/go/iam)
|
||||
[IoT][cloud-iot] | stable | [`cloud.google.com/go/iot/apiv1`](https://pkg.go.dev/cloud.google.com/go/iot/apiv1)
|
||||
[IRM][cloud-irm] | alpha | [`cloud.google.com/go/irm/apiv1alpha2`](https://pkg.go.dev/cloud.google.com/go/irm/apiv1alpha2)
|
||||
[KMS][cloud-kms] | stable | [`cloud.google.com/go/kms/apiv1`](https://pkg.go.dev/cloud.google.com/go/kms/apiv1)
|
||||
[Natural Language][cloud-natural-language] | stable | [`cloud.google.com/go/language/apiv1`](https://pkg.go.dev/cloud.google.com/go/language/apiv1)
|
||||
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`](https://pkg.go.dev/cloud.google.com/go/logging)
|
||||
[Memorystore][cloud-memorystore] | alpha | [`cloud.google.com/go/redis/apiv1`](https://pkg.go.dev/cloud.google.com/go/redis/apiv1)
|
||||
[Monitoring][cloud-monitoring] | stable | [`cloud.google.com/go/monitoring/apiv3`](https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3)
|
||||
[OS Login][cloud-oslogin] | stable | [`cloud.google.com/go/oslogin/apiv1`](https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1)
|
||||
[Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`](https://pkg.go.dev/cloud.google.com/go/pubsub)
|
||||
[Phishing Protection][cloud-phishingprotection] | alpha | [`cloud.google.com/go/phishingprotection/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1)
|
||||
[reCAPTCHA Enterprise][cloud-recaptcha] | alpha | [`cloud.google.com/go/recaptchaenterprise/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1)
|
||||
[Recommender][cloud-recommender] | beta | [`cloud.google.com/go/recommender/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1)
|
||||
[Scheduler][cloud-scheduler] | stable | [`cloud.google.com/go/scheduler/apiv1`](https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1)
|
||||
[Securitycenter][cloud-securitycenter] | stable | [`cloud.google.com/go/securitycenter/apiv1`](https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1)
|
||||
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`](https://pkg.go.dev/cloud.google.com/go/spanner)
|
||||
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`](https://pkg.go.dev/cloud.google.com/go/speech/apiv1)
|
||||
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`](https://pkg.go.dev/cloud.google.com/go/storage)
|
||||
[Talent][cloud-talent] | alpha | [`cloud.google.com/go/talent/apiv4beta1`](https://pkg.go.dev/cloud.google.com/go/talent/apiv4beta1)
|
||||
[Text To Speech][cloud-texttospeech] | stable | [`cloud.google.com/go/texttospeech/apiv1`](https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1)
|
||||
[Trace][cloud-trace] | stable | [`cloud.google.com/go/trace/apiv2`](https://pkg.go.dev/cloud.google.com/go/trace/apiv2)
|
||||
[Translate][cloud-translate] | stable | [`cloud.google.com/go/translate`](https://pkg.go.dev/cloud.google.com/go/translate)
|
||||
[Video Intelligence][cloud-video] | beta | [`cloud.google.com/go/videointelligence/apiv1beta2`](https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1beta2)
|
||||
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`](https://pkg.go.dev/cloud.google.com/go/vision/apiv1)
|
||||
[Webrisk][cloud-webrisk] | alpha | [`cloud.google.com/go/webrisk/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1)
|
||||
| Google API | Status | Package |
|
||||
| ----------------------------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||
| [Asset][cloud-asset] | stable | [`cloud.google.com/go/asset/apiv1`](https://pkg.go.dev/cloud.google.com/go/asset/v1beta) |
|
||||
| [Automl][cloud-automl] | stable | [`cloud.google.com/go/automl/apiv1`](https://pkg.go.dev/cloud.google.com/go/automl/apiv1) |
|
||||
| [BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) |
|
||||
| [Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`](https://pkg.go.dev/cloud.google.com/go/bigtable) |
|
||||
| [Cloudbuild][cloud-build] | stable | [`cloud.google.com/go/cloudbuild/apiv1`](https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1) |
|
||||
| [Cloudtasks][cloud-tasks] | stable | [`cloud.google.com/go/cloudtasks/apiv2`](https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2) |
|
||||
| [Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`](https://pkg.go.dev/cloud.google.com/go/container/apiv1) |
|
||||
| [ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1`](https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1) |
|
||||
| [Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`](https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1) |
|
||||
| [Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`](https://pkg.go.dev/cloud.google.com/go/datastore) |
|
||||
| [Debugger][cloud-debugger] | stable | [`cloud.google.com/go/debugger/apiv2`](https://pkg.go.dev/cloud.google.com/go/debugger/apiv2) |
|
||||
| [Dialogflow][cloud-dialogflow] | stable | [`cloud.google.com/go/dialogflow/apiv2`](https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2) |
|
||||
| [Data Loss Prevention][cloud-dlp] | stable | [`cloud.google.com/go/dlp/apiv2`](https://pkg.go.dev/cloud.google.com/go/dlp/apiv2) |
|
||||
| [ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`](https://pkg.go.dev/cloud.google.com/go/errorreporting) |
|
||||
| [Firestore][cloud-firestore] | stable | [`cloud.google.com/go/firestore`](https://pkg.go.dev/cloud.google.com/go/firestore) |
|
||||
| [IAM][cloud-iam] | stable | [`cloud.google.com/go/iam`](https://pkg.go.dev/cloud.google.com/go/iam) |
|
||||
| [IoT][cloud-iot] | stable | [`cloud.google.com/go/iot/apiv1`](https://pkg.go.dev/cloud.google.com/go/iot/apiv1) |
|
||||
| [IRM][cloud-irm] | alpha | [`cloud.google.com/go/irm/apiv1alpha2`](https://pkg.go.dev/cloud.google.com/go/irm/apiv1alpha2) |
|
||||
| [KMS][cloud-kms] | stable | [`cloud.google.com/go/kms/apiv1`](https://pkg.go.dev/cloud.google.com/go/kms/apiv1) |
|
||||
| [Natural Language][cloud-natural-language] | stable | [`cloud.google.com/go/language/apiv1`](https://pkg.go.dev/cloud.google.com/go/language/apiv1) |
|
||||
| [Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`](https://pkg.go.dev/cloud.google.com/go/logging) |
|
||||
| [Memorystore][cloud-memorystore] | alpha | [`cloud.google.com/go/redis/apiv1`](https://pkg.go.dev/cloud.google.com/go/redis/apiv1) |
|
||||
| [Monitoring][cloud-monitoring] | stable | [`cloud.google.com/go/monitoring/apiv3`](https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3) |
|
||||
| [OS Login][cloud-oslogin] | stable | [`cloud.google.com/go/oslogin/apiv1`](https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1) |
|
||||
| [Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`](https://pkg.go.dev/cloud.google.com/go/pubsub) |
|
||||
| [Pub/Sub Lite][cloud-pubsublite] | beta | [`cloud.google.com/go/pubsublite`](https://pkg.go.dev/cloud.google.com/go/pubsublite) |
|
||||
| [Phishing Protection][cloud-phishingprotection] | alpha | [`cloud.google.com/go/phishingprotection/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1) |
|
||||
| [reCAPTCHA Enterprise][cloud-recaptcha] | alpha | [`cloud.google.com/go/recaptchaenterprise/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1) |
|
||||
| [Recommender][cloud-recommender] | beta | [`cloud.google.com/go/recommender/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1) |
|
||||
| [Scheduler][cloud-scheduler] | stable | [`cloud.google.com/go/scheduler/apiv1`](https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1) |
|
||||
| [Securitycenter][cloud-securitycenter] | stable | [`cloud.google.com/go/securitycenter/apiv1`](https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1) |
|
||||
| [Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`](https://pkg.go.dev/cloud.google.com/go/spanner) |
|
||||
| [Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`](https://pkg.go.dev/cloud.google.com/go/speech/apiv1) |
|
||||
| [Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`](https://pkg.go.dev/cloud.google.com/go/storage) |
|
||||
| [Talent][cloud-talent] | alpha | [`cloud.google.com/go/talent/apiv4beta1`](https://pkg.go.dev/cloud.google.com/go/talent/apiv4beta1) |
|
||||
| [Text To Speech][cloud-texttospeech] | stable | [`cloud.google.com/go/texttospeech/apiv1`](https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1) |
|
||||
| [Trace][cloud-trace] | stable | [`cloud.google.com/go/trace/apiv2`](https://pkg.go.dev/cloud.google.com/go/trace/apiv2) |
|
||||
| [Translate][cloud-translate] | stable | [`cloud.google.com/go/translate`](https://pkg.go.dev/cloud.google.com/go/translate) |
|
||||
| [Video Intelligence][cloud-video] | beta | [`cloud.google.com/go/videointelligence/apiv1beta2`](https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1beta2) |
|
||||
| [Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`](https://pkg.go.dev/cloud.google.com/go/vision/apiv1) |
|
||||
| [Webrisk][cloud-webrisk] | alpha | [`cloud.google.com/go/webrisk/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1) |
|
||||
|
||||
> **Alpha status**: the API is still being actively developed. As a
|
||||
> result, it might change in backward-incompatible ways and is not recommended
|
||||
|
@ -85,10 +84,9 @@ Google API | Status | Package
|
|||
|
||||
Documentation and examples are available at [pkg.go.dev/cloud.google.com/go](https://pkg.go.dev/cloud.google.com/go)
|
||||
|
||||
## Go Versions Supported
|
||||
## [Go Versions Supported](#supported-versions)
|
||||
|
||||
We support the two most recent major versions of Go. If Google App Engine uses
|
||||
an older version, we support that as well.
|
||||
We currently support Go versions 1.11 and newer.
|
||||
|
||||
## Authorization
|
||||
|
||||
|
@ -153,6 +151,7 @@ for more information.
|
|||
[cloud-irm]: https://cloud.google.com/incident-response/docs/concepts
|
||||
[cloud-kms]: https://cloud.google.com/kms/
|
||||
[cloud-pubsub]: https://cloud.google.com/pubsub/
|
||||
[cloud-pubsublite]: https://cloud.google.com/pubsub/lite
|
||||
[cloud-storage]: https://cloud.google.com/storage/
|
||||
[cloud-language]: https://cloud.google.com/natural-language
|
||||
[cloud-logging]: https://cloud.google.com/logging/
|
||||
|
@ -176,3 +175,11 @@ for more information.
|
|||
[cloud-video]: https://cloud.google.com/video-intelligence/
|
||||
[cloud-vision]: https://cloud.google.com/vision
|
||||
[cloud-webrisk]: https://cloud.google.com/web-risk/
|
||||
|
||||
## Links
|
||||
|
||||
- [Go on Google Cloud](https://cloud.google.com/go/home)
|
||||
- [Getting started with Go on Google Cloud](https://cloud.google.com/go/getting-started)
|
||||
- [App Engine Quickstart](https://cloud.google.com/appengine/docs/standard/go/quickstart)
|
||||
- [Cloud Functions Quickstart](https://cloud.google.com/functions/docs/quickstart-go)
|
||||
- [Cloud Run Quickstart](https://cloud.google.com/run/docs/quickstarts/build-and-deploy#go)
|
||||
|
|
119
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
119
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
|
@ -1,25 +1,6 @@
|
|||
# Setup from scratch
|
||||
# Releasing
|
||||
|
||||
1. [Install Go](https://golang.org/dl/).
|
||||
1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`)
|
||||
is in your `PATH`.
|
||||
1. Check it's working by running `go version`.
|
||||
* If it doesn't work, check the install location, usually
|
||||
`/usr/local/go`, is on your `PATH`.
|
||||
|
||||
1. Sign one of the
|
||||
[contributor license agreements](#contributor-license-agreements) below.
|
||||
|
||||
1. Clone the repo:
|
||||
`git clone https://github.com/googleapis/google-cloud-go`
|
||||
|
||||
1. Change into the checked out source:
|
||||
`cd google-cloud-go`
|
||||
|
||||
1. Fork the repo and add your fork as a secondary remote (this is necessary in
|
||||
order to create PRs).
|
||||
|
||||
# Which module to release?
|
||||
## Determine which module to release
|
||||
|
||||
The Go client libraries have several modules. Each module does not strictly
|
||||
correspond to a single library - they correspond to trees of directories. If a
|
||||
|
@ -27,17 +8,22 @@ file needs to be released, you must release the closest ancestor module.
|
|||
|
||||
To see all modules:
|
||||
|
||||
```
|
||||
```bash
|
||||
$ cat `find . -name go.mod` | grep module
|
||||
module cloud.google.com/go
|
||||
module cloud.google.com/go/bigtable
|
||||
module cloud.google.com/go/firestore
|
||||
module cloud.google.com/go/bigquery
|
||||
module cloud.google.com/go/storage
|
||||
module cloud.google.com/go/datastore
|
||||
module cloud.google.com/go/pubsub
|
||||
module cloud.google.com/go/spanner
|
||||
module cloud.google.com/go
|
||||
module cloud.google.com/go/bigtable
|
||||
module cloud.google.com/go/bigquery
|
||||
module cloud.google.com/go/storage
|
||||
module cloud.google.com/go/pubsublite
|
||||
module cloud.google.com/go/firestore
|
||||
module cloud.google.com/go/logging
|
||||
module cloud.google.com/go/internal/gapicgen
|
||||
module cloud.google.com/go/internal/godocfx
|
||||
module cloud.google.com/go/internal/examples/fake
|
||||
module cloud.google.com/go/internal/examples/mock
|
||||
module cloud.google.com/go/datastore
|
||||
```
|
||||
|
||||
The `cloud.google.com/go` is the repository root module. Each other module is
|
||||
|
@ -53,18 +39,47 @@ of the `cloud.google.com/go` repository root module. Note: releasing
|
|||
`cloud.google.com/go` has no impact on any of the submodules, and vice-versa.
|
||||
They are released entirely independently.
|
||||
|
||||
# Test failures
|
||||
## Test failures
|
||||
|
||||
If there are any test failures in the Kokoro build, releases are blocked until
|
||||
the failures have been resolved.
|
||||
|
||||
# How to release `cloud.google.com/go`
|
||||
## How to release
|
||||
|
||||
### Automated Releases (`cloud.google.com/go` and submodules)
|
||||
|
||||
We now use [release-please](https://github.com/googleapis/release-please) to
|
||||
perform automated releases for `cloud.google.com/go` and all submodules.
|
||||
|
||||
1. If there are changes that have not yet been released, a
|
||||
[pull request](https://github.com/googleapis/google-cloud-go/pull/2971) will
|
||||
be automatically opened by release-please
|
||||
with a title like "chore: release X.Y.Z" (for the root module) or
|
||||
"chore: release datastore X.Y.Z" (for the datastore submodule), where X.Y.Z
|
||||
is the next version to be released. Find the desired pull request
|
||||
[here](https://github.com/googleapis/google-cloud-go/pulls)
|
||||
1. Check for failures in the
|
||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
|
||||
any failures in the most recent build, address them before proceeding with
|
||||
the release. (This applies even if the failures are in a different submodule
|
||||
from the one being released.)
|
||||
1. Review the release notes. These are automatically generated from the titles
|
||||
of any merged commits since the previous release. If you would like to edit
|
||||
them, this can be done by updating the changes in the release PR.
|
||||
1. To cut a release, approve and merge the pull request. Doing so will
|
||||
update the `CHANGES.md`, tag the merged commit with the appropriate version,
|
||||
and draft a GitHub release which will copy the notes from `CHANGES.md`.
|
||||
|
||||
### Manual Release (`cloud.google.com/go`)
|
||||
|
||||
If for whatever reason the automated release process is not working as expected,
|
||||
here is how to manually cut a release of `cloud.google.com/go`.
|
||||
|
||||
1. Check for failures in the
|
||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any
|
||||
failures in the most recent build, address them before proceeding with the
|
||||
release.
|
||||
1. Navigate to `~/code/gocloud/` and switch to master.
|
||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
|
||||
any failures in the most recent build, address them before proceeding with
|
||||
the release.
|
||||
1. Navigate to `google-cloud-go/` and switch to master.
|
||||
1. `git pull`
|
||||
1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases.
|
||||
The current latest tag `$CV` is the largest tag. It should look something
|
||||
|
@ -76,8 +91,11 @@ the failures have been resolved.
|
|||
(the `git log` is going to show you things in submodules, which are not going
|
||||
to be part of your release).
|
||||
1. Edit `CHANGES.md` to include a summary of the changes.
|
||||
1. `cd internal/version && go generate && cd -`
|
||||
1. Commit the changes, push to your fork, and create a PR.
|
||||
1. In `internal/version/version.go`, update `const Repo` to today's date with
|
||||
the format `YYYYMMDD`.
|
||||
1. In `internal/version` run `go generate`.
|
||||
1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
|
||||
and create a PR titled `chore: release $NV`.
|
||||
1. Wait for the PR to be reviewed and merged. Once it's merged, and without
|
||||
merging any other PRs in the meantime:
|
||||
a. Switch to master.
|
||||
|
@ -85,24 +103,22 @@ the failures have been resolved.
|
|||
c. Tag the repo with the next version: `git tag $NV`.
|
||||
d. Push the tag to origin:
|
||||
`git push origin $NV`
|
||||
2. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
|
||||
1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
|
||||
with the new release, copying the contents of `CHANGES.md`.
|
||||
|
||||
# How to release a submodule
|
||||
### Manual Releases (submodules)
|
||||
|
||||
We have several submodules, including `cloud.google.com/go/logging`,
|
||||
`cloud.google.com/go/datastore`, and so on.
|
||||
|
||||
To release a submodule:
|
||||
If for whatever reason the automated release process is not working as expected,
|
||||
here is how to manually cut a release of a submodule.
|
||||
|
||||
(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly)
|
||||
|
||||
1. Check for failures in the
|
||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any
|
||||
failures in the most recent build, address them before proceeding with the
|
||||
release. (This applies even if the failures are in a different submodule from the one
|
||||
being released.)
|
||||
1. Navigate to `~/code/gocloud/` and switch to master.
|
||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
|
||||
any failures in the most recent build, address them before proceeding with
|
||||
the release. (This applies even if the failures are in a different submodule
|
||||
from the one being released.)
|
||||
1. Navigate to `google-cloud-go/` and switch to master.
|
||||
1. `git pull`
|
||||
1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all
|
||||
existing releases. The current latest tag `$CV` is the largest tag. It
|
||||
|
@ -111,8 +127,9 @@ To release a submodule:
|
|||
1. On master, run `git log $CV.. -- datastore/` to list all the changes to the
|
||||
submodule directory since the last release.
|
||||
1. Edit `datastore/CHANGES.md` to include a summary of the changes.
|
||||
1. `cd internal/version && go generate && cd -`
|
||||
1. Commit the changes, push to your fork, and create a PR.
|
||||
1. In `internal/version` run `go generate`.
|
||||
1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
|
||||
and create a PR titled `chore(datastore): release $NV`.
|
||||
1. Wait for the PR to be reviewed and merged. Once it's merged, and without
|
||||
merging any other PRs in the meantime:
|
||||
a. Switch to master.
|
||||
|
@ -122,7 +139,3 @@ To release a submodule:
|
|||
`git push origin $NV`
|
||||
1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
|
||||
with the new release, copying the contents of `datastore/CHANGES.md`.
|
||||
|
||||
# Appendix
|
||||
|
||||
1: This should get better as submodule tooling matures.
|
||||
|
|
1
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
1
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
|
@ -296,6 +296,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
|||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
suffix = strings.TrimLeft(suffix, "/")
|
||||
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, err := http.NewRequest("GET", u, nil)
|
||||
if err != nil {
|
||||
|
|
29
vendor/cloud.google.com/go/doc.go
generated
vendored
29
vendor/cloud.google.com/go/doc.go
generated
vendored
|
@ -34,9 +34,18 @@ in this package for details.
|
|||
|
||||
Timeouts and Cancellation
|
||||
|
||||
By default, all requests in sub-packages will run indefinitely, retrying on transient
|
||||
errors when correctness allows. To set timeouts or arrange for cancellation, use
|
||||
contexts. See the examples for details.
|
||||
By default, non-streaming methods, like Create or Get, will have a default deadline applied to the
|
||||
context provided at call time, unless a context deadline is already set. Streaming
|
||||
methods have no default deadline and will run indefinitely. To set timeouts or
|
||||
arrange for cancellation, use contexts. See the examples for details. Transient
|
||||
errors will be retried when correctness allows.
|
||||
|
||||
To opt out of default deadlines, set the temporary environment variable
|
||||
GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client
|
||||
creation. This affects all Google Cloud Go client libraries. This opt-out
|
||||
mechanism will be removed in a future release. File an issue at
|
||||
https://github.com/googleapis/google-cloud-go if the default deadlines
|
||||
cannot work for you.
|
||||
|
||||
Do not attempt to control the initial connection (dialing) of a service by setting a
|
||||
timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts
|
||||
|
@ -76,6 +85,20 @@ https://godoc.org/google.golang.org/grpc/grpclog for more information.
|
|||
For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2".
|
||||
|
||||
|
||||
Inspecting errors
|
||||
|
||||
Most of the errors returned by the generated clients can be converted into a
|
||||
`grpc.Status`. Converting your errors to this type can be a useful to get
|
||||
more information about what went wrong while debugging.
|
||||
if err != {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
log.Println(s.Message())
|
||||
for _, d := range s.Proto().Details {
|
||||
log.Println(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Client Stability
|
||||
|
||||
Clients in this repository are considered alpha or beta unless otherwise
|
||||
|
|
2
vendor/cloud.google.com/go/internal/version/version.go
generated
vendored
2
vendor/cloud.google.com/go/internal/version/version.go
generated
vendored
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
// Repo is the current version of the client libraries in this
|
||||
// repo. It should be a date in YYYYMMDD format.
|
||||
const Repo = "20200618"
|
||||
const Repo = "20201104"
|
||||
|
||||
// Go returns the Go runtime version. The returned string
|
||||
// has no whitespace.
|
||||
|
|
61
vendor/cloud.google.com/go/logging/CHANGES.md
generated
vendored
61
vendor/cloud.google.com/go/logging/CHANGES.md
generated
vendored
|
@ -1,6 +1,65 @@
|
|||
# Changes
|
||||
|
||||
### [1.4.2](https://www.github.com/googleapis/google-cloud-go/compare/logging/v1.4.1...logging/v1.4.2) (2021-05-20)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **logging:** correctly detect GKE resource ([#4092](https://www.github.com/googleapis/google-cloud-go/issues/4092)) ([a2538e1](https://www.github.com/googleapis/google-cloud-go/commit/a2538e16123c21da62036b56df8c104360f1c2d6))
|
||||
|
||||
### [1.4.1](https://www.github.com/googleapis/google-cloud-go/compare/logging/v1.4.0...logging/v1.4.1) (2021-05-03)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **logging:** allow nil or custom zones in resource detection ([#3997](https://www.github.com/googleapis/google-cloud-go/issues/3997)) ([aded90b](https://www.github.com/googleapis/google-cloud-go/commit/aded90b92de3fa3bed079af1aa4879d00572e8ae))
|
||||
* **logging:** appengine zone label ([#3998](https://www.github.com/googleapis/google-cloud-go/issues/3998)) ([394a586](https://www.github.com/googleapis/google-cloud-go/commit/394a586bac04953e92a6496a7ca3b61bd64155ab))
|
||||
|
||||
## [1.4.0](https://www.github.com/googleapis/google-cloud-go/compare/logging/v1.2.0...logging/v1.4.0) (2021-04-15)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **logging:** cloud run and functions resource autodetection ([#3909](https://www.github.com/googleapis/google-cloud-go/issues/3909)) ([1204de8](https://www.github.com/googleapis/google-cloud-go/commit/1204de85e58334bf93fecdcb0ab8b581449c2745))
|
||||
* **logging:** make toLogEntry function public ([#3863](https://www.github.com/googleapis/google-cloud-go/issues/3863)) ([71828c2](https://www.github.com/googleapis/google-cloud-go/commit/71828c28d424c34da6d0392651739a364cd57e79))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **logging:** Entries has a 24H default filter ([#3120](https://www.github.com/googleapis/google-cloud-go/issues/3120)) ([b32eb82](https://www.github.com/googleapis/google-cloud-go/commit/b32eb822d17838bde91c610a5a9d392d325a592d))
|
||||
|
||||
## v1.3.0
|
||||
|
||||
- Updates to various dependencies.
|
||||
|
||||
## [1.2.0](https://www.github.com/googleapis/google-cloud-go/compare/logging/v1.1.2...v1.2.0) (2021-01-25)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **logging:** add localIP and Cache fields to HTTPRequest conversion from proto ([#3600](https://www.github.com/googleapis/google-cloud-go/issues/3600)) ([f93027b](https://www.github.com/googleapis/google-cloud-go/commit/f93027b47735e7c181989666e0826bea57ec51e1))
|
||||
|
||||
### [1.1.2](https://www.github.com/googleapis/google-cloud-go/compare/logging/v1.1.1...v1.1.2) (2020-11-09)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **logging:** allow X-Cloud-Trace-Context fields to be optional ([#3062](https://www.github.com/googleapis/google-cloud-go/issues/3062)) ([7ff03cf](https://www.github.com/googleapis/google-cloud-go/commit/7ff03cf9a544e753de5b034e18339ecf517d2193))
|
||||
* **logging:** do not panic in library code ([#3076](https://www.github.com/googleapis/google-cloud-go/issues/3076)) ([529be97](https://www.github.com/googleapis/google-cloud-go/commit/529be977f766443f49cb8914e17ba07c93841e84)), closes [#1862](https://www.github.com/googleapis/google-cloud-go/issues/1862)
|
||||
|
||||
## v1.1.1
|
||||
|
||||
- Rebrand "Stackdriver Logging" to "Cloud Logging".
|
||||
|
||||
## v1.1.0
|
||||
|
||||
- Support unmarshalling stringified Severity.
|
||||
- Add exported SetGoogleClientInfo wrappers to manual file.
|
||||
- Support no payload.
|
||||
- Update "Grouping Logs by Request" docs.
|
||||
- Add auto-detection of monitored resources on GAE Standard.
|
||||
|
||||
## v1.0.0
|
||||
|
||||
This is the first tag to carve out logging as its own module. See:
|
||||
https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
|
||||
https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
|
||||
|
|
17
vendor/cloud.google.com/go/logging/README.md
generated
vendored
17
vendor/cloud.google.com/go/logging/README.md
generated
vendored
|
@ -1,25 +1,27 @@
|
|||
## Stackdriver Logging [](https://godoc.org/cloud.google.com/go/logging)
|
||||
## Cloud Logging [](https://pkg.go.dev/cloud.google.com/go/logging)
|
||||
|
||||
- [About Stackdriver Logging](https://cloud.google.com/logging/)
|
||||
- [About Cloud Logging](https://cloud.google.com/logging/)
|
||||
- [API documentation](https://cloud.google.com/logging/docs)
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/logging)
|
||||
- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/logging)
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `logging.Client` to use throughout your application:
|
||||
[snip]:# (logging-1)
|
||||
|
||||
```go
|
||||
ctx := context.Background()
|
||||
client, err := logging.NewClient(ctx, "my-project")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
|
||||
Usually, you'll want to add log entries to a buffer to be periodically flushed
|
||||
(automatically and asynchronously) to the Stackdriver Logging service.
|
||||
(automatically and asynchronously) to the Cloud Logging service.
|
||||
[snip]:# (logging-2)
|
||||
|
||||
```go
|
||||
logger := client.Logger("my-log")
|
||||
logger.Log(logging.Entry{Payload: "something happened!"})
|
||||
|
@ -27,9 +29,10 @@ logger.Log(logging.Entry{Payload: "something happened!"})
|
|||
|
||||
Close your client before your program exits, to flush any buffered log entries.
|
||||
[snip]:# (logging-3)
|
||||
|
||||
```go
|
||||
err = client.Close()
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
```
|
||||
|
|
678
vendor/cloud.google.com/go/logging/apiv2/config_client.go
generated
vendored
678
vendor/cloud.google.com/go/logging/apiv2/config_client.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2019 Google LLC
|
||||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -12,7 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by gapic-generator. DO NOT EDIT.
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package logging
|
||||
|
||||
|
@ -27,37 +27,69 @@ import (
|
|||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
loggingpb "google.golang.org/genproto/googleapis/logging/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
var newConfigClientHook clientHook
|
||||
|
||||
// ConfigCallOptions contains the retry settings for each method of ConfigClient.
|
||||
type ConfigCallOptions struct {
|
||||
ListSinks []gax.CallOption
|
||||
GetSink []gax.CallOption
|
||||
CreateSink []gax.CallOption
|
||||
UpdateSink []gax.CallOption
|
||||
DeleteSink []gax.CallOption
|
||||
ListExclusions []gax.CallOption
|
||||
GetExclusion []gax.CallOption
|
||||
CreateExclusion []gax.CallOption
|
||||
UpdateExclusion []gax.CallOption
|
||||
DeleteExclusion []gax.CallOption
|
||||
ListBuckets []gax.CallOption
|
||||
GetBucket []gax.CallOption
|
||||
CreateBucket []gax.CallOption
|
||||
UpdateBucket []gax.CallOption
|
||||
DeleteBucket []gax.CallOption
|
||||
UndeleteBucket []gax.CallOption
|
||||
ListViews []gax.CallOption
|
||||
GetView []gax.CallOption
|
||||
CreateView []gax.CallOption
|
||||
UpdateView []gax.CallOption
|
||||
DeleteView []gax.CallOption
|
||||
ListSinks []gax.CallOption
|
||||
GetSink []gax.CallOption
|
||||
CreateSink []gax.CallOption
|
||||
UpdateSink []gax.CallOption
|
||||
DeleteSink []gax.CallOption
|
||||
ListExclusions []gax.CallOption
|
||||
GetExclusion []gax.CallOption
|
||||
CreateExclusion []gax.CallOption
|
||||
UpdateExclusion []gax.CallOption
|
||||
DeleteExclusion []gax.CallOption
|
||||
GetCmekSettings []gax.CallOption
|
||||
UpdateCmekSettings []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultConfigClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("logging.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
internaloption.WithDefaultEndpoint("logging.googleapis.com:443"),
|
||||
internaloption.WithDefaultMTLSEndpoint("logging.mtls.googleapis.com:443"),
|
||||
internaloption.WithDefaultAudience("https://logging.googleapis.com/"),
|
||||
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
|
||||
option.WithGRPCDialOption(grpc.WithDisableServiceConfig()),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultConfigCallOptions() *ConfigCallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
return &ConfigCallOptions{
|
||||
ListBuckets: []gax.CallOption{},
|
||||
GetBucket: []gax.CallOption{},
|
||||
CreateBucket: []gax.CallOption{},
|
||||
UpdateBucket: []gax.CallOption{},
|
||||
DeleteBucket: []gax.CallOption{},
|
||||
UndeleteBucket: []gax.CallOption{},
|
||||
ListViews: []gax.CallOption{},
|
||||
GetView: []gax.CallOption{},
|
||||
CreateView: []gax.CallOption{},
|
||||
UpdateView: []gax.CallOption{},
|
||||
DeleteView: []gax.CallOption{},
|
||||
ListSinks: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
|
@ -66,31 +98,105 @@ func defaultConfigCallOptions() *ConfigCallOptions {
|
|||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &ConfigCallOptions{
|
||||
ListSinks: retry[[2]string{"default", "idempotent"}],
|
||||
GetSink: retry[[2]string{"default", "idempotent"}],
|
||||
CreateSink: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateSink: retry[[2]string{"default", "idempotent"}],
|
||||
DeleteSink: retry[[2]string{"default", "idempotent"}],
|
||||
ListExclusions: retry[[2]string{"default", "idempotent"}],
|
||||
GetExclusion: retry[[2]string{"default", "idempotent"}],
|
||||
CreateExclusion: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateExclusion: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteExclusion: retry[[2]string{"default", "idempotent"}],
|
||||
GetSink: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
CreateSink: []gax.CallOption{},
|
||||
UpdateSink: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
DeleteSink: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
ListExclusions: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
GetExclusion: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
CreateExclusion: []gax.CallOption{},
|
||||
UpdateExclusion: []gax.CallOption{},
|
||||
DeleteExclusion: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
GetCmekSettings: []gax.CallOption{},
|
||||
UpdateCmekSettings: []gax.CallOption{},
|
||||
}
|
||||
}
|
||||
|
||||
// ConfigClient is a client for interacting with Stackdriver Logging API.
|
||||
// ConfigClient is a client for interacting with Cloud Logging API.
|
||||
//
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
type ConfigClient struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
// Connection pool of gRPC connections to the service.
|
||||
connPool gtransport.ConnPool
|
||||
|
||||
// flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE
|
||||
disableDeadlines bool
|
||||
|
||||
// The gRPC API client.
|
||||
configClient loggingpb.ConfigServiceV2Client
|
||||
|
@ -104,43 +210,300 @@ type ConfigClient struct {
|
|||
|
||||
// NewConfigClient creates a new config service v2 client.
|
||||
//
|
||||
// Service for configuring sinks used to export log entries out of
|
||||
// Logging.
|
||||
// Service for configuring sinks used to route log entries.
|
||||
func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultConfigClientOptions(), opts...)...)
|
||||
clientOpts := defaultConfigClientOptions()
|
||||
|
||||
if newConfigClientHook != nil {
|
||||
hookOpts, err := newConfigClientHook(ctx, clientHookParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientOpts = append(clientOpts, hookOpts...)
|
||||
}
|
||||
|
||||
disableDeadlines, err := checkDisableDeadlines()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &ConfigClient{
|
||||
conn: conn,
|
||||
CallOptions: defaultConfigCallOptions(),
|
||||
connPool: connPool,
|
||||
disableDeadlines: disableDeadlines,
|
||||
CallOptions: defaultConfigCallOptions(),
|
||||
|
||||
configClient: loggingpb.NewConfigServiceV2Client(conn),
|
||||
configClient: loggingpb.NewConfigServiceV2Client(connPool),
|
||||
}
|
||||
c.SetGoogleClientInfo()
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated.
|
||||
func (c *ConfigClient) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
return c.connPool.Conn()
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *ConfigClient) Close() error {
|
||||
return c.conn.Close()
|
||||
return c.connPool.Close()
|
||||
}
|
||||
|
||||
// SetGoogleClientInfo sets the name and version of the application in
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *ConfigClient) SetGoogleClientInfo(keyval ...string) {
|
||||
func (c *ConfigClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", versionGo()}, keyval...)
|
||||
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ListBuckets lists buckets.
|
||||
func (c *ConfigClient) ListBuckets(ctx context.Context, req *loggingpb.ListBucketsRequest, opts ...gax.CallOption) *LogBucketIterator {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.ListBuckets[0:len(c.CallOptions.ListBuckets):len(c.CallOptions.ListBuckets)], opts...)
|
||||
it := &LogBucketIterator{}
|
||||
req = proto.Clone(req).(*loggingpb.ListBucketsRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogBucket, string, error) {
|
||||
var resp *loggingpb.ListBucketsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.ListBuckets(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetBuckets(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
return it
|
||||
}
|
||||
|
||||
// GetBucket gets a bucket.
|
||||
func (c *ConfigClient) GetBucket(ctx context.Context, req *loggingpb.GetBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.GetBucket[0:len(c.CallOptions.GetBucket):len(c.CallOptions.GetBucket)], opts...)
|
||||
var resp *loggingpb.LogBucket
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.GetBucket(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateBucket creates a bucket that can be used to store log entries. Once a bucket has
|
||||
// been created, the region cannot be changed.
|
||||
func (c *ConfigClient) CreateBucket(ctx context.Context, req *loggingpb.CreateBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.CreateBucket[0:len(c.CallOptions.CreateBucket):len(c.CallOptions.CreateBucket)], opts...)
|
||||
var resp *loggingpb.LogBucket
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.CreateBucket(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateBucket updates a bucket. This method replaces the following fields in the
|
||||
// existing bucket with values from the new bucket: retention_period
|
||||
//
|
||||
// If the retention period is decreased and the bucket is locked,
|
||||
// FAILED_PRECONDITION will be returned.
|
||||
//
|
||||
// If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION
|
||||
// will be returned.
|
||||
//
|
||||
// A buckets region may not be modified after it is created.
|
||||
func (c *ConfigClient) UpdateBucket(ctx context.Context, req *loggingpb.UpdateBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.UpdateBucket[0:len(c.CallOptions.UpdateBucket):len(c.CallOptions.UpdateBucket)], opts...)
|
||||
var resp *loggingpb.LogBucket
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.UpdateBucket(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket.
|
||||
// Moves the bucket to the DELETE_REQUESTED state. After 7 days, the
|
||||
// bucket will be purged and all logs in the bucket will be permanently
|
||||
// deleted.
|
||||
func (c *ConfigClient) DeleteBucket(ctx context.Context, req *loggingpb.DeleteBucketRequest, opts ...gax.CallOption) error {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.DeleteBucket[0:len(c.CallOptions.DeleteBucket):len(c.CallOptions.DeleteBucket)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.configClient.DeleteBucket(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// UndeleteBucket undeletes a bucket. A bucket that has been deleted may be undeleted within
|
||||
// the grace period of 7 days.
|
||||
func (c *ConfigClient) UndeleteBucket(ctx context.Context, req *loggingpb.UndeleteBucketRequest, opts ...gax.CallOption) error {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.UndeleteBucket[0:len(c.CallOptions.UndeleteBucket):len(c.CallOptions.UndeleteBucket)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.configClient.UndeleteBucket(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// ListViews lists views on a bucket.
|
||||
func (c *ConfigClient) ListViews(ctx context.Context, req *loggingpb.ListViewsRequest, opts ...gax.CallOption) *LogViewIterator {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.ListViews[0:len(c.CallOptions.ListViews):len(c.CallOptions.ListViews)], opts...)
|
||||
it := &LogViewIterator{}
|
||||
req = proto.Clone(req).(*loggingpb.ListViewsRequest)
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogView, string, error) {
|
||||
var resp *loggingpb.ListViewsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.ListViews(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetViews(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
return it
|
||||
}
|
||||
|
||||
// GetView gets a view.
|
||||
func (c *ConfigClient) GetView(ctx context.Context, req *loggingpb.GetViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.GetView[0:len(c.CallOptions.GetView):len(c.CallOptions.GetView)], opts...)
|
||||
var resp *loggingpb.LogView
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.GetView(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateView creates a view over logs in a bucket. A bucket may contain a maximum of
|
||||
// 50 views.
|
||||
func (c *ConfigClient) CreateView(ctx context.Context, req *loggingpb.CreateViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.CreateView[0:len(c.CallOptions.CreateView):len(c.CallOptions.CreateView)], opts...)
|
||||
var resp *loggingpb.LogView
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.CreateView(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateView updates a view. This method replaces the following fields in the existing
|
||||
// view with values from the new view: filter.
|
||||
func (c *ConfigClient) UpdateView(ctx context.Context, req *loggingpb.UpdateViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.UpdateView[0:len(c.CallOptions.UpdateView):len(c.CallOptions.UpdateView)], opts...)
|
||||
var resp *loggingpb.LogView
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.UpdateView(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteView deletes a view from a bucket.
|
||||
func (c *ConfigClient) DeleteView(ctx context.Context, req *loggingpb.DeleteViewRequest, opts ...gax.CallOption) error {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.DeleteView[0:len(c.CallOptions.DeleteView):len(c.CallOptions.DeleteView)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.configClient.DeleteView(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// ListSinks lists sinks.
|
||||
func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest, opts ...gax.CallOption) *LogSinkIterator {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
|
||||
|
@ -164,7 +527,9 @@ func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRe
|
|||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.Sinks, resp.NextPageToken, nil
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetSinks(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
|
@ -175,13 +540,18 @@ func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRe
|
|||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.PageSize)
|
||||
it.pageInfo.Token = req.PageToken
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
return it
|
||||
}
|
||||
|
||||
// GetSink gets a sink.
|
||||
func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.GetSink[0:len(c.CallOptions.GetSink):len(c.CallOptions.GetSink)], opts...)
|
||||
|
@ -197,11 +567,16 @@ func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkReques
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateSink creates a sink that exports specified log entries to a destination. The
|
||||
// export of newly-ingested log entries begins immediately, unless the sink's
|
||||
// writer_identity is not permitted to write to the destination. A sink can
|
||||
// CreateSink creates a sink that exports specified log entries to a destination. The
|
||||
// export of newly-ingested log entries begins immediately, unless the sink’s
|
||||
// writer_identity is not permitted to write to the destination. A sink can
|
||||
// export log entries only from the resource owning the sink.
|
||||
func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 120000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.CreateSink[0:len(c.CallOptions.CreateSink):len(c.CallOptions.CreateSink)], opts...)
|
||||
|
@ -217,11 +592,17 @@ func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSink
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateSink updates a sink. This method replaces the following fields in the existing
|
||||
// UpdateSink updates a sink. This method replaces the following fields in the existing
|
||||
// sink with values from the new sink: destination, and filter.
|
||||
//
|
||||
// The updated sink might also have a new writer_identity; see the
|
||||
// unique_writer_identity field.
|
||||
func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...)
|
||||
|
@ -240,6 +621,11 @@ func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSink
|
|||
// DeleteSink deletes a sink. If the sink has a unique writer_identity, then that
|
||||
// service account is also deleted.
|
||||
func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.DeleteSink[0:len(c.CallOptions.DeleteSink):len(c.CallOptions.DeleteSink)], opts...)
|
||||
|
@ -274,7 +660,9 @@ func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListEx
|
|||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.Exclusions, resp.NextPageToken, nil
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetExclusions(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
|
@ -285,13 +673,18 @@ func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListEx
|
|||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.PageSize)
|
||||
it.pageInfo.Token = req.PageToken
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
return it
|
||||
}
|
||||
|
||||
// GetExclusion gets the description of an exclusion.
|
||||
func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.GetExclusion[0:len(c.CallOptions.GetExclusion):len(c.CallOptions.GetExclusion)], opts...)
|
||||
|
@ -311,6 +704,11 @@ func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclu
|
|||
// Only log entries belonging to that resource can be excluded.
|
||||
// You can have up to 10 exclusions in a resource.
|
||||
func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.CreateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 120000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.CreateExclusion[0:len(c.CallOptions.CreateExclusion):len(c.CallOptions.CreateExclusion)], opts...)
|
||||
|
@ -328,6 +726,11 @@ func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.Creat
|
|||
|
||||
// UpdateExclusion changes one or more properties of an existing exclusion.
|
||||
func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.UpdateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 120000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.UpdateExclusion[0:len(c.CallOptions.UpdateExclusion):len(c.CallOptions.UpdateExclusion)], opts...)
|
||||
|
@ -345,6 +748,11 @@ func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.Updat
|
|||
|
||||
// DeleteExclusion deletes an exclusion.
|
||||
func (c *ConfigClient) DeleteExclusion(ctx context.Context, req *loggingpb.DeleteExclusionRequest, opts ...gax.CallOption) error {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.DeleteExclusion[0:len(c.CallOptions.DeleteExclusion):len(c.CallOptions.DeleteExclusion)], opts...)
|
||||
|
@ -356,12 +764,120 @@ func (c *ConfigClient) DeleteExclusion(ctx context.Context, req *loggingpb.Delet
|
|||
return err
|
||||
}
|
||||
|
||||
// GetCmekSettings gets the Logs Router CMEK settings for the given resource.
|
||||
//
|
||||
// Note: CMEK for the Logs Router can currently only be configured for GCP
|
||||
// organizations. Once configured, it applies to all projects and folders in
|
||||
// the GCP organization.
|
||||
//
|
||||
// See Enabling CMEK for Logs
|
||||
// Router (at https://cloud.google.com/logging/docs/routing/managed-encryption)
|
||||
// for more information.
|
||||
func (c *ConfigClient) GetCmekSettings(ctx context.Context, req *loggingpb.GetCmekSettingsRequest, opts ...gax.CallOption) (*loggingpb.CmekSettings, error) {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.GetCmekSettings[0:len(c.CallOptions.GetCmekSettings):len(c.CallOptions.GetCmekSettings)], opts...)
|
||||
var resp *loggingpb.CmekSettings
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.GetCmekSettings(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateCmekSettings updates the Logs Router CMEK settings for the given resource.
|
||||
//
|
||||
// Note: CMEK for the Logs Router can currently only be configured for GCP
|
||||
// organizations. Once configured, it applies to all projects and folders in
|
||||
// the GCP organization.
|
||||
//
|
||||
// UpdateCmekSettings
|
||||
// will fail if 1) kms_key_name is invalid, or 2) the associated service
|
||||
// account does not have the required
|
||||
// roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or
|
||||
// 3) access to the key is disabled.
|
||||
//
|
||||
// See Enabling CMEK for Logs
|
||||
// Router (at https://cloud.google.com/logging/docs/routing/managed-encryption)
|
||||
// for more information.
|
||||
func (c *ConfigClient) UpdateCmekSettings(ctx context.Context, req *loggingpb.UpdateCmekSettingsRequest, opts ...gax.CallOption) (*loggingpb.CmekSettings, error) {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.UpdateCmekSettings[0:len(c.CallOptions.UpdateCmekSettings):len(c.CallOptions.UpdateCmekSettings)], opts...)
|
||||
var resp *loggingpb.CmekSettings
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.configClient.UpdateCmekSettings(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// LogBucketIterator manages a stream of *loggingpb.LogBucket.
|
||||
type LogBucketIterator struct {
|
||||
items []*loggingpb.LogBucket
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogBucket, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *LogBucketIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *LogBucketIterator) Next() (*loggingpb.LogBucket, error) {
|
||||
var item *loggingpb.LogBucket
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *LogBucketIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *LogBucketIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// LogExclusionIterator manages a stream of *loggingpb.LogExclusion.
|
||||
type LogExclusionIterator struct {
|
||||
items []*loggingpb.LogExclusion
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
|
@ -404,6 +920,11 @@ type LogSinkIterator struct {
|
|||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
|
@ -439,3 +960,50 @@ func (it *LogSinkIterator) takeBuf() interface{} {
|
|||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// LogViewIterator manages a stream of *loggingpb.LogView.
|
||||
type LogViewIterator struct {
|
||||
items []*loggingpb.LogView
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogView, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *LogViewIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *LogViewIterator) Next() (*loggingpb.LogView, error) {
|
||||
var item *loggingpb.LogView
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *LogViewIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *LogViewIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
|
41
vendor/cloud.google.com/go/logging/apiv2/doc.go
generated
vendored
41
vendor/cloud.google.com/go/logging/apiv2/doc.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2019 Google LLC
|
||||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -12,14 +12,15 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by gapic-generator. DO NOT EDIT.
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
// Package logging is an auto-generated package for the
|
||||
// Stackdriver Logging API.
|
||||
// Cloud Logging API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// Writes log entries and manages your Logging configuration.
|
||||
// Writes log entries and manages your Cloud Logging configuration. The table
|
||||
// entries below are presented in alphabetical order, not in order of common
|
||||
// use. For explanations of the concepts found in the table entries, read the
|
||||
// documentation at https://cloud.google.com/logging/docs.
|
||||
//
|
||||
// Use of Context
|
||||
//
|
||||
|
@ -30,20 +31,28 @@
|
|||
// To close the open connection, use the Close() method.
|
||||
//
|
||||
// For information about setting deadlines, reusing contexts, and more
|
||||
// please visit godoc.org/cloud.google.com/go.
|
||||
//
|
||||
// Use the client at cloud.google.com/go/logging in preference to this.
|
||||
// please visit pkg.go.dev/cloud.google.com/go.
|
||||
package logging // import "cloud.google.com/go/logging/apiv2"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// For more information on implementing a client constructor hook, see
|
||||
// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
|
||||
type clientHookParams struct{}
|
||||
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
|
||||
|
||||
const versionClient = "20210518"
|
||||
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
|
@ -55,6 +64,16 @@ func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
|||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
func checkDisableDeadlines() (bool, error) {
|
||||
raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE")
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
b, err := strconv.ParseBool(raw)
|
||||
return b, err
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
|
@ -81,7 +100,7 @@ func versionGo() string {
|
|||
}
|
||||
|
||||
notSemverRune := func(r rune) bool {
|
||||
return strings.IndexRune("0123456789.", r) < 0
|
||||
return !strings.ContainsRune("0123456789.", r)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "go1") {
|
||||
|
@ -102,5 +121,3 @@ func versionGo() string {
|
|||
}
|
||||
return "UNKNOWN"
|
||||
}
|
||||
|
||||
const versionClient = "20190801"
|
||||
|
|
206
vendor/cloud.google.com/go/logging/apiv2/gapic_metadata.json
generated
vendored
Normal file
206
vendor/cloud.google.com/go/logging/apiv2/gapic_metadata.json
generated
vendored
Normal file
|
@ -0,0 +1,206 @@
|
|||
{
|
||||
"schema": "1.0",
|
||||
"comment": "This file maps proto services/RPCs to the corresponding library clients/methods.",
|
||||
"language": "go",
|
||||
"protoPackage": "google.logging.v2",
|
||||
"libraryPackage": "cloud.google.com/go/logging/apiv2",
|
||||
"services": {
|
||||
"ConfigServiceV2": {
|
||||
"clients": {
|
||||
"grpc": {
|
||||
"libraryClient": "ConfigClient",
|
||||
"rpcs": {
|
||||
"CreateBucket": {
|
||||
"methods": [
|
||||
"CreateBucket"
|
||||
]
|
||||
},
|
||||
"CreateExclusion": {
|
||||
"methods": [
|
||||
"CreateExclusion"
|
||||
]
|
||||
},
|
||||
"CreateSink": {
|
||||
"methods": [
|
||||
"CreateSink"
|
||||
]
|
||||
},
|
||||
"CreateView": {
|
||||
"methods": [
|
||||
"CreateView"
|
||||
]
|
||||
},
|
||||
"DeleteBucket": {
|
||||
"methods": [
|
||||
"DeleteBucket"
|
||||
]
|
||||
},
|
||||
"DeleteExclusion": {
|
||||
"methods": [
|
||||
"DeleteExclusion"
|
||||
]
|
||||
},
|
||||
"DeleteSink": {
|
||||
"methods": [
|
||||
"DeleteSink"
|
||||
]
|
||||
},
|
||||
"DeleteView": {
|
||||
"methods": [
|
||||
"DeleteView"
|
||||
]
|
||||
},
|
||||
"GetBucket": {
|
||||
"methods": [
|
||||
"GetBucket"
|
||||
]
|
||||
},
|
||||
"GetCmekSettings": {
|
||||
"methods": [
|
||||
"GetCmekSettings"
|
||||
]
|
||||
},
|
||||
"GetExclusion": {
|
||||
"methods": [
|
||||
"GetExclusion"
|
||||
]
|
||||
},
|
||||
"GetSink": {
|
||||
"methods": [
|
||||
"GetSink"
|
||||
]
|
||||
},
|
||||
"GetView": {
|
||||
"methods": [
|
||||
"GetView"
|
||||
]
|
||||
},
|
||||
"ListBuckets": {
|
||||
"methods": [
|
||||
"ListBuckets"
|
||||
]
|
||||
},
|
||||
"ListExclusions": {
|
||||
"methods": [
|
||||
"ListExclusions"
|
||||
]
|
||||
},
|
||||
"ListSinks": {
|
||||
"methods": [
|
||||
"ListSinks"
|
||||
]
|
||||
},
|
||||
"ListViews": {
|
||||
"methods": [
|
||||
"ListViews"
|
||||
]
|
||||
},
|
||||
"UndeleteBucket": {
|
||||
"methods": [
|
||||
"UndeleteBucket"
|
||||
]
|
||||
},
|
||||
"UpdateBucket": {
|
||||
"methods": [
|
||||
"UpdateBucket"
|
||||
]
|
||||
},
|
||||
"UpdateCmekSettings": {
|
||||
"methods": [
|
||||
"UpdateCmekSettings"
|
||||
]
|
||||
},
|
||||
"UpdateExclusion": {
|
||||
"methods": [
|
||||
"UpdateExclusion"
|
||||
]
|
||||
},
|
||||
"UpdateSink": {
|
||||
"methods": [
|
||||
"UpdateSink"
|
||||
]
|
||||
},
|
||||
"UpdateView": {
|
||||
"methods": [
|
||||
"UpdateView"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"LoggingServiceV2": {
|
||||
"clients": {
|
||||
"grpc": {
|
||||
"libraryClient": "Client",
|
||||
"rpcs": {
|
||||
"DeleteLog": {
|
||||
"methods": [
|
||||
"DeleteLog"
|
||||
]
|
||||
},
|
||||
"ListLogEntries": {
|
||||
"methods": [
|
||||
"ListLogEntries"
|
||||
]
|
||||
},
|
||||
"ListLogs": {
|
||||
"methods": [
|
||||
"ListLogs"
|
||||
]
|
||||
},
|
||||
"ListMonitoredResourceDescriptors": {
|
||||
"methods": [
|
||||
"ListMonitoredResourceDescriptors"
|
||||
]
|
||||
},
|
||||
"TailLogEntries": {
|
||||
"methods": [
|
||||
"TailLogEntries"
|
||||
]
|
||||
},
|
||||
"WriteLogEntries": {
|
||||
"methods": [
|
||||
"WriteLogEntries"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"MetricsServiceV2": {
|
||||
"clients": {
|
||||
"grpc": {
|
||||
"libraryClient": "MetricsClient",
|
||||
"rpcs": {
|
||||
"CreateLogMetric": {
|
||||
"methods": [
|
||||
"CreateLogMetric"
|
||||
]
|
||||
},
|
||||
"DeleteLogMetric": {
|
||||
"methods": [
|
||||
"DeleteLogMetric"
|
||||
]
|
||||
},
|
||||
"GetLogMetric": {
|
||||
"methods": [
|
||||
"GetLogMetric"
|
||||
]
|
||||
},
|
||||
"ListLogMetrics": {
|
||||
"methods": [
|
||||
"ListLogMetrics"
|
||||
]
|
||||
},
|
||||
"UpdateLogMetric": {
|
||||
"methods": [
|
||||
"UpdateLogMetric"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
42
vendor/cloud.google.com/go/logging/apiv2/info.go
generated
vendored
Normal file
42
vendor/cloud.google.com/go/logging/apiv2/info.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logging
|
||||
|
||||
// SetGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Also passes any
|
||||
// provided key-value pairs. Intended for use by Google-written clients.
|
||||
//
|
||||
// Internal use only.
|
||||
func (c *Client) SetGoogleClientInfo(keyval ...string) {
|
||||
c.setGoogleClientInfo(keyval...)
|
||||
}
|
||||
|
||||
// SetGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Also passes any
|
||||
// provided key-value pairs. Intended for use by Google-written clients.
|
||||
//
|
||||
// Internal use only.
|
||||
func (mc *MetricsClient) SetGoogleClientInfo(keyval ...string) {
|
||||
mc.setGoogleClientInfo(keyval...)
|
||||
}
|
||||
|
||||
// SetGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Also passes any
|
||||
// provided key-value pairs. Intended for use by Google-written clients.
|
||||
//
|
||||
// Internal use only.
|
||||
func (cc *ConfigClient) SetGoogleClientInfo(keyval ...string) {
|
||||
cc.setGoogleClientInfo(keyval...)
|
||||
}
|
226
vendor/cloud.google.com/go/logging/apiv2/logging_client.go
generated
vendored
226
vendor/cloud.google.com/go/logging/apiv2/logging_client.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2019 Google LLC
|
||||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -12,7 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by gapic-generator. DO NOT EDIT.
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package logging
|
||||
|
||||
|
@ -27,7 +27,8 @@ import (
|
|||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
loggingpb "google.golang.org/genproto/googleapis/logging/v2"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -35,6 +36,8 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
var newClientHook clientHook
|
||||
|
||||
// CallOptions contains the retry settings for each method of Client.
|
||||
type CallOptions struct {
|
||||
DeleteLog []gax.CallOption
|
||||
|
@ -42,18 +45,24 @@ type CallOptions struct {
|
|||
ListLogEntries []gax.CallOption
|
||||
ListMonitoredResourceDescriptors []gax.CallOption
|
||||
ListLogs []gax.CallOption
|
||||
TailLogEntries []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("logging.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
internaloption.WithDefaultEndpoint("logging.googleapis.com:443"),
|
||||
internaloption.WithDefaultMTLSEndpoint("logging.mtls.googleapis.com:443"),
|
||||
internaloption.WithDefaultAudience("https://logging.googleapis.com/"),
|
||||
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
|
||||
option.WithGRPCDialOption(grpc.WithDisableServiceConfig()),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultCallOptions() *CallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
return &CallOptions{
|
||||
DeleteLog: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
|
@ -62,26 +71,87 @@ func defaultCallOptions() *CallOptions {
|
|||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
WriteLogEntries: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
ListLogEntries: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
ListMonitoredResourceDescriptors: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
ListLogs: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
TailLogEntries: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &CallOptions{
|
||||
DeleteLog: retry[[2]string{"default", "idempotent"}],
|
||||
WriteLogEntries: retry[[2]string{"default", "idempotent"}],
|
||||
ListLogEntries: retry[[2]string{"default", "idempotent"}],
|
||||
ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}],
|
||||
ListLogs: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// Client is a client for interacting with Stackdriver Logging API.
|
||||
// Client is a client for interacting with Cloud Logging API.
|
||||
//
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
type Client struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
// Connection pool of gRPC connections to the service.
|
||||
connPool gtransport.ConnPool
|
||||
|
||||
// flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE
|
||||
disableDeadlines bool
|
||||
|
||||
// The gRPC API client.
|
||||
client loggingpb.LoggingServiceV2Client
|
||||
|
@ -97,45 +167,69 @@ type Client struct {
|
|||
//
|
||||
// Service for ingesting and querying logs.
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
|
||||
clientOpts := defaultClientOptions()
|
||||
|
||||
if newClientHook != nil {
|
||||
hookOpts, err := newClientHook(ctx, clientHookParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientOpts = append(clientOpts, hookOpts...)
|
||||
}
|
||||
|
||||
disableDeadlines, err := checkDisableDeadlines()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
conn: conn,
|
||||
CallOptions: defaultCallOptions(),
|
||||
connPool: connPool,
|
||||
disableDeadlines: disableDeadlines,
|
||||
CallOptions: defaultCallOptions(),
|
||||
|
||||
client: loggingpb.NewLoggingServiceV2Client(conn),
|
||||
client: loggingpb.NewLoggingServiceV2Client(connPool),
|
||||
}
|
||||
c.SetGoogleClientInfo()
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated.
|
||||
func (c *Client) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
return c.connPool.Conn()
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *Client) Close() error {
|
||||
return c.conn.Close()
|
||||
return c.connPool.Close()
|
||||
}
|
||||
|
||||
// SetGoogleClientInfo sets the name and version of the application in
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *Client) SetGoogleClientInfo(keyval ...string) {
|
||||
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", versionGo()}, keyval...)
|
||||
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// DeleteLog deletes all the log entries in a log.
|
||||
// The log reappears if it receives new entries.
|
||||
// Log entries written shortly before the delete operation might not be
|
||||
// deleted.
|
||||
// DeleteLog deletes all the log entries in a log. The log reappears if it receives new
|
||||
// entries. Log entries written shortly before the delete operation might not
|
||||
// be deleted. Entries received after the delete operation with a timestamp
|
||||
// before the operation will be deleted.
|
||||
func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, opts ...gax.CallOption) error {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "log_name", url.QueryEscape(req.GetLogName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.DeleteLog[0:len(c.CallOptions.DeleteLog):len(c.CallOptions.DeleteLog)], opts...)
|
||||
|
@ -155,6 +249,11 @@ func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest,
|
|||
// different resources (projects, organizations, billing accounts or
|
||||
// folders)
|
||||
func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest, opts ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.WriteLogEntries[0:len(c.CallOptions.WriteLogEntries):len(c.CallOptions.WriteLogEntries)], opts...)
|
||||
var resp *loggingpb.WriteLogEntriesResponse
|
||||
|
@ -169,9 +268,10 @@ func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEnt
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
// ListLogEntries lists log entries. Use this method to retrieve log entries from
|
||||
// Logging. For ways to export log entries, see
|
||||
// Exporting Logs (at /logging/docs/export).
|
||||
// ListLogEntries lists log entries. Use this method to retrieve log entries that originated
|
||||
// from a project/folder/organization/billing account. For ways to export log
|
||||
// entries, see Exporting
|
||||
// Logs (at https://cloud.google.com/logging/docs/export).
|
||||
func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...)
|
||||
|
@ -193,7 +293,9 @@ func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntri
|
|||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.Entries, resp.NextPageToken, nil
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetEntries(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
|
@ -204,8 +306,8 @@ func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntri
|
|||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.PageSize)
|
||||
it.pageInfo.Token = req.PageToken
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
return it
|
||||
}
|
||||
|
||||
|
@ -231,7 +333,9 @@ func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *logg
|
|||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.ResourceDescriptors, resp.NextPageToken, nil
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetResourceDescriptors(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
|
@ -242,8 +346,8 @@ func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *logg
|
|||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.PageSize)
|
||||
it.pageInfo.Token = req.PageToken
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
return it
|
||||
}
|
||||
|
||||
|
@ -271,7 +375,9 @@ func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, o
|
|||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.LogNames, resp.NextPageToken, nil
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetLogNames(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
|
@ -282,17 +388,39 @@ func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, o
|
|||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.PageSize)
|
||||
it.pageInfo.Token = req.PageToken
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
return it
|
||||
}
|
||||
|
||||
// TailLogEntries streaming read of log entries as they are ingested. Until the stream is
|
||||
// terminated, it will continue reading logs.
|
||||
func (c *Client) TailLogEntries(ctx context.Context, opts ...gax.CallOption) (loggingpb.LoggingServiceV2_TailLogEntriesClient, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.TailLogEntries[0:len(c.CallOptions.TailLogEntries):len(c.CallOptions.TailLogEntries)], opts...)
|
||||
var resp loggingpb.LoggingServiceV2_TailLogEntriesClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.TailLogEntries(ctx, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// LogEntryIterator manages a stream of *loggingpb.LogEntry.
|
||||
type LogEntryIterator struct {
|
||||
items []*loggingpb.LogEntry
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
|
@ -335,6 +463,11 @@ type MonitoredResourceDescriptorIterator struct {
|
|||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
|
@ -377,6 +510,11 @@ type StringIterator struct {
|
|||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
|
|
152
vendor/cloud.google.com/go/logging/apiv2/metrics_client.go
generated
vendored
152
vendor/cloud.google.com/go/logging/apiv2/metrics_client.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2019 Google LLC
|
||||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -12,7 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by gapic-generator. DO NOT EDIT.
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package logging
|
||||
|
||||
|
@ -27,13 +27,16 @@ import (
|
|||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
loggingpb "google.golang.org/genproto/googleapis/logging/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
var newMetricsClientHook clientHook
|
||||
|
||||
// MetricsCallOptions contains the retry settings for each method of MetricsClient.
|
||||
type MetricsCallOptions struct {
|
||||
ListLogMetrics []gax.CallOption
|
||||
|
@ -45,14 +48,19 @@ type MetricsCallOptions struct {
|
|||
|
||||
func defaultMetricsClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("logging.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
internaloption.WithDefaultEndpoint("logging.googleapis.com:443"),
|
||||
internaloption.WithDefaultMTLSEndpoint("logging.mtls.googleapis.com:443"),
|
||||
internaloption.WithDefaultAudience("https://logging.googleapis.com/"),
|
||||
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
|
||||
option.WithGRPCDialOption(grpc.WithDisableServiceConfig()),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMetricsCallOptions() *MetricsCallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
return &MetricsCallOptions{
|
||||
ListLogMetrics: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
|
@ -61,26 +69,62 @@ func defaultMetricsCallOptions() *MetricsCallOptions {
|
|||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
GetLogMetric: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
CreateLogMetric: []gax.CallOption{},
|
||||
UpdateLogMetric: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
DeleteLogMetric: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Internal,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.30,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &MetricsCallOptions{
|
||||
ListLogMetrics: retry[[2]string{"default", "idempotent"}],
|
||||
GetLogMetric: retry[[2]string{"default", "idempotent"}],
|
||||
CreateLogMetric: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateLogMetric: retry[[2]string{"default", "idempotent"}],
|
||||
DeleteLogMetric: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// MetricsClient is a client for interacting with Stackdriver Logging API.
|
||||
// MetricsClient is a client for interacting with Cloud Logging API.
|
||||
//
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
type MetricsClient struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
// Connection pool of gRPC connections to the service.
|
||||
connPool gtransport.ConnPool
|
||||
|
||||
// flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE
|
||||
disableDeadlines bool
|
||||
|
||||
// The gRPC API client.
|
||||
metricsClient loggingpb.MetricsServiceV2Client
|
||||
|
@ -96,35 +140,54 @@ type MetricsClient struct {
|
|||
//
|
||||
// Service for configuring logs-based metrics.
|
||||
func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*MetricsClient, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultMetricsClientOptions(), opts...)...)
|
||||
clientOpts := defaultMetricsClientOptions()
|
||||
|
||||
if newMetricsClientHook != nil {
|
||||
hookOpts, err := newMetricsClientHook(ctx, clientHookParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientOpts = append(clientOpts, hookOpts...)
|
||||
}
|
||||
|
||||
disableDeadlines, err := checkDisableDeadlines()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &MetricsClient{
|
||||
conn: conn,
|
||||
CallOptions: defaultMetricsCallOptions(),
|
||||
connPool: connPool,
|
||||
disableDeadlines: disableDeadlines,
|
||||
CallOptions: defaultMetricsCallOptions(),
|
||||
|
||||
metricsClient: loggingpb.NewMetricsServiceV2Client(conn),
|
||||
metricsClient: loggingpb.NewMetricsServiceV2Client(connPool),
|
||||
}
|
||||
c.SetGoogleClientInfo()
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated.
|
||||
func (c *MetricsClient) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
return c.connPool.Conn()
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *MetricsClient) Close() error {
|
||||
return c.conn.Close()
|
||||
return c.connPool.Close()
|
||||
}
|
||||
|
||||
// SetGoogleClientInfo sets the name and version of the application in
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *MetricsClient) SetGoogleClientInfo(keyval ...string) {
|
||||
func (c *MetricsClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", versionGo()}, keyval...)
|
||||
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
|
@ -153,7 +216,9 @@ func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListL
|
|||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.Metrics, resp.NextPageToken, nil
|
||||
|
||||
it.Response = resp
|
||||
return resp.GetMetrics(), resp.GetNextPageToken(), nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
|
@ -164,13 +229,18 @@ func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListL
|
|||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
it.pageInfo.MaxSize = int(req.PageSize)
|
||||
it.pageInfo.Token = req.PageToken
|
||||
it.pageInfo.MaxSize = int(req.GetPageSize())
|
||||
it.pageInfo.Token = req.GetPageToken()
|
||||
return it
|
||||
}
|
||||
|
||||
// GetLogMetric gets a logs-based metric.
|
||||
func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.GetLogMetric[0:len(c.CallOptions.GetLogMetric):len(c.CallOptions.GetLogMetric)], opts...)
|
||||
|
@ -188,6 +258,11 @@ func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogM
|
|||
|
||||
// CreateLogMetric creates a logs-based metric.
|
||||
func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.CreateLogMetric[0:len(c.CallOptions.CreateLogMetric):len(c.CallOptions.CreateLogMetric)], opts...)
|
||||
|
@ -205,6 +280,11 @@ func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.Crea
|
|||
|
||||
// UpdateLogMetric creates or updates a logs-based metric.
|
||||
func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.UpdateLogMetric[0:len(c.CallOptions.UpdateLogMetric):len(c.CallOptions.UpdateLogMetric)], opts...)
|
||||
|
@ -222,6 +302,11 @@ func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.Upda
|
|||
|
||||
// DeleteLogMetric deletes a logs-based metric.
|
||||
func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest, opts ...gax.CallOption) error {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.DeleteLogMetric[0:len(c.CallOptions.DeleteLogMetric):len(c.CallOptions.DeleteLogMetric)], opts...)
|
||||
|
@ -239,6 +324,11 @@ type LogMetricIterator struct {
|
|||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// Response is the raw response for the current page.
|
||||
// It must be cast to the RPC response type.
|
||||
// Calling Next() or InternalFetch() updates this value.
|
||||
Response interface{}
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
|
|
21
vendor/cloud.google.com/go/logging/doc.go
generated
vendored
21
vendor/cloud.google.com/go/logging/doc.go
generated
vendored
|
@ -13,7 +13,7 @@
|
|||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package logging contains a Stackdriver Logging client suitable for writing logs.
|
||||
Package logging contains a Cloud Logging client suitable for writing logs.
|
||||
For reading logs, and working with sinks, metrics and monitored resources,
|
||||
see package cloud.google.com/go/logging/logadmin.
|
||||
|
||||
|
@ -23,7 +23,7 @@ See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API
|
|||
|
||||
Creating a Client
|
||||
|
||||
Use a Client to interact with the Stackdriver Logging API.
|
||||
Use a Client to interact with the Cloud Logging API.
|
||||
|
||||
// Create a Client
|
||||
ctx := context.Background()
|
||||
|
@ -36,7 +36,7 @@ Use a Client to interact with the Stackdriver Logging API.
|
|||
Basic Usage
|
||||
|
||||
For most use cases, you'll want to add log entries to a buffer to be periodically
|
||||
flushed (automatically and asynchronously) to the Stackdriver Logging service.
|
||||
flushed (automatically and asynchronously) to the Cloud Logging service.
|
||||
|
||||
// Initialize a logger
|
||||
lg := client.Logger("my-log")
|
||||
|
@ -47,7 +47,7 @@ flushed (automatically and asynchronously) to the Stackdriver Logging service.
|
|||
|
||||
Closing your Client
|
||||
|
||||
You should call Client.Close before your program exits to flush any buffered log entries to the Stackdriver Logging service.
|
||||
You should call Client.Close before your program exits to flush any buffered log entries to the Cloud Logging service.
|
||||
|
||||
// Close the client when finished.
|
||||
err = client.Close()
|
||||
|
@ -106,7 +106,7 @@ An Entry may have one of a number of severity levels associated with it.
|
|||
|
||||
Viewing Logs
|
||||
|
||||
You can view Stackdriver logs for projects at
|
||||
You can view Cloud logs for projects at
|
||||
https://console.cloud.google.com/logs/viewer. Use the dropdown at the top left. When
|
||||
running from a Google Cloud Platform VM, select "GCE VM Instance". Otherwise, select
|
||||
"Google Project" and then the project ID. Logs for organizations, folders and billing
|
||||
|
@ -117,13 +117,14 @@ Grouping Logs by Request
|
|||
|
||||
To group all the log entries written during a single HTTP request, create two
|
||||
Loggers, a "parent" and a "child," with different log IDs. Both should be in the same
|
||||
project, and have the same MonitoredResouce type and labels.
|
||||
project, and have the same MonitoredResource type and labels.
|
||||
|
||||
- Parent entries must have HTTPRequest.Request populated. (Strictly speaking, only the URL is necessary.)
|
||||
- Parent entries must have HTTPRequest.Request (strictly speaking, only Method and URL are necessary),
|
||||
and HTTPRequest.Status populated.
|
||||
|
||||
- A child entry's timestamp must be within the time interval covered by the parent request (i.e., older
|
||||
than parent.Timestamp, and newer than parent.Timestamp - parent.HTTPRequest.Latency, assuming the
|
||||
parent timestamp marks the end of the request.
|
||||
- A child entry's timestamp must be within the time interval covered by the parent request. (i.e., before
|
||||
the parent.Timestamp and after the parent.Timestamp - parent.HTTPRequest.Latency. This assumes the
|
||||
parent.Timestamp marks the end of the request.)
|
||||
|
||||
- The trace field must be populated in all of the entries and match exactly.
|
||||
|
||||
|
|
202
vendor/cloud.google.com/go/logging/logging.go
generated
vendored
202
vendor/cloud.google.com/go/logging/logging.go
generated
vendored
|
@ -39,14 +39,12 @@ import (
|
|||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"cloud.google.com/go/internal/version"
|
||||
vkit "cloud.google.com/go/logging/apiv2"
|
||||
"cloud.google.com/go/logging/internal"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
structpb "github.com/golang/protobuf/ptypes/struct"
|
||||
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/support/bundler"
|
||||
mrpb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
|
@ -137,11 +135,8 @@ type Client struct {
|
|||
// By default NewClient uses WriteScope. To use a different scope, call
|
||||
// NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes).
|
||||
func NewClient(ctx context.Context, parent string, opts ...option.ClientOption) (*Client, error) {
|
||||
if !strings.ContainsRune(parent, '/') {
|
||||
parent = "projects/" + parent
|
||||
}
|
||||
parent = makeParent(parent)
|
||||
opts = append([]option.ClientOption{
|
||||
option.WithEndpoint(internal.ProdAddr),
|
||||
option.WithScopes(WriteScope),
|
||||
}, opts...)
|
||||
c, err := vkit.NewClient(ctx, opts...)
|
||||
|
@ -173,26 +168,27 @@ func NewClient(ctx context.Context, parent string, opts ...option.ClientOption)
|
|||
return client, nil
|
||||
}
|
||||
|
||||
var unixZeroTimestamp *tspb.Timestamp
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
unixZeroTimestamp, err = ptypes.TimestampProto(time.Unix(0, 0))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
func makeParent(parent string) string {
|
||||
if !strings.ContainsRune(parent, '/') {
|
||||
return "projects/" + parent
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
// Ping reports whether the client's connection to the logging service and the
|
||||
// authentication configuration are valid. To accomplish this, Ping writes a
|
||||
// log entry "ping" to a log named "ping".
|
||||
func (c *Client) Ping(ctx context.Context) error {
|
||||
unixZeroTimestamp, err := ptypes.TimestampProto(time.Unix(0, 0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ent := &logpb.LogEntry{
|
||||
Payload: &logpb.LogEntry_TextPayload{TextPayload: "ping"},
|
||||
Timestamp: unixZeroTimestamp, // Identical timestamps and insert IDs are both
|
||||
InsertId: "ping", // necessary for the service to dedup these entries.
|
||||
}
|
||||
_, err := c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{
|
||||
_, err = c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{
|
||||
LogName: internal.LogPath(c.parent, "ping"),
|
||||
Resource: monitoredResource(c.parent),
|
||||
Entries: []*logpb.LogEntry{ent},
|
||||
|
@ -244,86 +240,6 @@ type LoggerOption interface {
|
|||
set(*Logger)
|
||||
}
|
||||
|
||||
// CommonResource sets the monitored resource associated with all log entries
|
||||
// written from a Logger. If not provided, the resource is automatically
|
||||
// detected based on the running environment. This value can be overridden
|
||||
// per-entry by setting an Entry's Resource field.
|
||||
func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} }
|
||||
|
||||
type commonResource struct{ *mrpb.MonitoredResource }
|
||||
|
||||
func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource }
|
||||
|
||||
var detectedResource struct {
|
||||
pb *mrpb.MonitoredResource
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
func detectResource() *mrpb.MonitoredResource {
|
||||
detectedResource.once.Do(func() {
|
||||
if !metadata.OnGCE() {
|
||||
return
|
||||
}
|
||||
projectID, err := metadata.ProjectID()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
id, err := metadata.InstanceID()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
zone, err := metadata.Zone()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
name, err := metadata.InstanceName()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
detectedResource.pb = &mrpb.MonitoredResource{
|
||||
Type: "gce_instance",
|
||||
Labels: map[string]string{
|
||||
"project_id": projectID,
|
||||
"instance_id": id,
|
||||
"instance_name": name,
|
||||
"zone": zone,
|
||||
},
|
||||
}
|
||||
})
|
||||
return detectedResource.pb
|
||||
}
|
||||
|
||||
var resourceInfo = map[string]struct{ rtype, label string }{
|
||||
"organizations": {"organization", "organization_id"},
|
||||
"folders": {"folder", "folder_id"},
|
||||
"projects": {"project", "project_id"},
|
||||
"billingAccounts": {"billing_account", "account_id"},
|
||||
}
|
||||
|
||||
func monitoredResource(parent string) *mrpb.MonitoredResource {
|
||||
parts := strings.SplitN(parent, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return globalResource(parent)
|
||||
}
|
||||
info, ok := resourceInfo[parts[0]]
|
||||
if !ok {
|
||||
return globalResource(parts[1])
|
||||
}
|
||||
return &mrpb.MonitoredResource{
|
||||
Type: info.rtype,
|
||||
Labels: map[string]string{info.label: parts[1]},
|
||||
}
|
||||
}
|
||||
|
||||
func globalResource(projectID string) *mrpb.MonitoredResource {
|
||||
return &mrpb.MonitoredResource{
|
||||
Type: "global",
|
||||
Labels: map[string]string{
|
||||
"project_id": projectID,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CommonLabels are labels that apply to all log entries written from a Logger,
|
||||
// so that you don't have to repeat them in each log entry's Labels field. If
|
||||
// any of the log entries contains a (key, value) with the same key that is in
|
||||
|
@ -544,6 +460,17 @@ func (v Severity) String() string {
|
|||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns a string representation of severity into the type
|
||||
// Severity.
|
||||
func (v *Severity) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = ParseSeverity(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseSeverity returns the Severity whose name equals s, ignoring case. It
|
||||
// returns Default if no Severity matches.
|
||||
func ParseSeverity(s string) Severity {
|
||||
|
@ -654,14 +581,20 @@ type HTTPRequest struct {
|
|||
// validated with the origin server before being served from cache. This
|
||||
// field is only meaningful if CacheHit is true.
|
||||
CacheValidatedWithOriginServer bool
|
||||
|
||||
// CacheFillBytes is the number of HTTP response bytes inserted into cache. Set only when a cache fill was attempted.
|
||||
CacheFillBytes int64
|
||||
|
||||
// CacheLookup tells whether or not a cache lookup was attempted.
|
||||
CacheLookup bool
|
||||
}
|
||||
|
||||
func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest {
|
||||
func fromHTTPRequest(r *HTTPRequest) (*logtypepb.HttpRequest, error) {
|
||||
if r == nil {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
if r.Request == nil {
|
||||
panic("HTTPRequest must have a non-nil Request")
|
||||
return nil, errors.New("logging: HTTPRequest must have a non-nil Request")
|
||||
}
|
||||
u := *r.Request.URL
|
||||
u.Fragment = ""
|
||||
|
@ -677,11 +610,14 @@ func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest {
|
|||
Referer: r.Request.Referer(),
|
||||
CacheHit: r.CacheHit,
|
||||
CacheValidatedWithOriginServer: r.CacheValidatedWithOriginServer,
|
||||
Protocol: r.Request.Proto,
|
||||
CacheFillBytes: r.CacheFillBytes,
|
||||
CacheLookup: r.CacheLookup,
|
||||
}
|
||||
if r.Latency != 0 {
|
||||
pb.Latency = ptypes.DurationProto(r.Latency)
|
||||
}
|
||||
return pb
|
||||
return pb, nil
|
||||
}
|
||||
|
||||
// fixUTF8 is a helper that fixes an invalid UTF-8 string by replacing
|
||||
|
@ -761,16 +697,15 @@ func jsonValueToStructValue(v interface{}) *structpb.Value {
|
|||
}
|
||||
return &structpb.Value{Kind: &structpb.Value_ListValue{ListValue: &structpb.ListValue{Values: vals}}}
|
||||
default:
|
||||
panic(fmt.Sprintf("bad type %T for JSON value", v))
|
||||
return &structpb.Value{Kind: &structpb.Value_NullValue{}}
|
||||
}
|
||||
}
|
||||
|
||||
// LogSync logs the Entry synchronously without any buffering. Because LogSync is slow
|
||||
// and will block, it is intended primarily for debugging or critical errors.
|
||||
// Prefer Log for most uses.
|
||||
// TODO(jba): come up with a better name (LogNow?) or eliminate.
|
||||
func (l *Logger) LogSync(ctx context.Context, e Entry) error {
|
||||
ent, err := l.toLogEntry(e)
|
||||
ent, err := toLogEntryInternal(e, l.client, l.client.parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -785,7 +720,7 @@ func (l *Logger) LogSync(ctx context.Context, e Entry) error {
|
|||
|
||||
// Log buffers the Entry for output to the logging service. It never blocks.
|
||||
func (l *Logger) Log(e Entry) {
|
||||
ent, err := l.toLogEntry(e)
|
||||
ent, err := toLogEntryInternal(e, l.client, l.client.parent)
|
||||
if err != nil {
|
||||
l.client.error(err)
|
||||
return
|
||||
|
@ -832,38 +767,55 @@ func (l *Logger) writeLogEntries(entries []*logpb.LogEntry) {
|
|||
// (for example by calling SetFlags or SetPrefix).
|
||||
func (l *Logger) StandardLogger(s Severity) *log.Logger { return l.stdLoggers[s] }
|
||||
|
||||
var reCloudTraceContext = regexp.MustCompile(`([a-f\d]+)/([a-f\d]+);o=(\d)`)
|
||||
var reCloudTraceContext = regexp.MustCompile(
|
||||
// Matches on "TRACE_ID"
|
||||
`([a-f\d]+)?` +
|
||||
// Matches on "/SPAN_ID"
|
||||
`(?:/([a-f\d]+))?` +
|
||||
// Matches on ";0=TRACE_TRUE"
|
||||
`(?:;o=(\d))?`)
|
||||
|
||||
func deconstructXCloudTraceContext(s string) (traceID, spanID string, traceSampled bool) {
|
||||
// As per the format described at https://cloud.google.com/trace/docs/troubleshooting#force-trace
|
||||
// As per the format described at https://cloud.google.com/trace/docs/setup#force-trace
|
||||
// "X-Cloud-Trace-Context: TRACE_ID/SPAN_ID;o=TRACE_TRUE"
|
||||
// for example:
|
||||
// "X-Cloud-Trace-Context: 105445aa7843bc8bf206b120001000/0;o=1"
|
||||
// "X-Cloud-Trace-Context: 105445aa7843bc8bf206b120001000/1;o=1"
|
||||
//
|
||||
// We expect:
|
||||
// * traceID: "105445aa7843bc8bf206b120001000"
|
||||
// * spanID: ""
|
||||
// * traceSampled: true
|
||||
matches := reCloudTraceContext.FindAllStringSubmatch(s, -1)
|
||||
if len(matches) != 1 {
|
||||
return
|
||||
}
|
||||
// * traceID (optional): "105445aa7843bc8bf206b120001000"
|
||||
// * spanID (optional): "1"
|
||||
// * traceSampled (optional): true
|
||||
matches := reCloudTraceContext.FindStringSubmatch(s)
|
||||
|
||||
sub := matches[0]
|
||||
if len(sub) != 4 {
|
||||
return
|
||||
}
|
||||
traceID, spanID, traceSampled = matches[1], matches[2], matches[3] == "1"
|
||||
|
||||
traceID, spanID = sub[1], sub[2]
|
||||
if spanID == "0" {
|
||||
spanID = ""
|
||||
}
|
||||
traceSampled = sub[3] == "1"
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Logger) toLogEntry(e Entry) (*logpb.LogEntry, error) {
|
||||
// ToLogEntry takes an Entry structure and converts it to the LogEntry proto.
|
||||
// A parent can take any of the following forms:
|
||||
// projects/PROJECT_ID
|
||||
// folders/FOLDER_ID
|
||||
// billingAccounts/ACCOUNT_ID
|
||||
// organizations/ORG_ID
|
||||
// for backwards compatibility, a string with no '/' is also allowed and is interpreted
|
||||
// as a project ID.
|
||||
//
|
||||
// ToLogEntry is implied when users invoke Logger.Log or Logger.LogSync,
|
||||
// but its exported as a pub function here to give users additional flexibility
|
||||
// when using the library. Don't call this method manually if Logger.Log or
|
||||
// Logger.LogSync are used, it is intended to be used together with direct call
|
||||
// to WriteLogEntries method.
|
||||
func ToLogEntry(e Entry, parent string) (*logpb.LogEntry, error) {
|
||||
// We have this method to support logging agents that need a bigger flexibility.
|
||||
return toLogEntryInternal(e, nil, makeParent(parent))
|
||||
}
|
||||
|
||||
func toLogEntryInternal(e Entry, client *Client, parent string) (*logpb.LogEntry, error) {
|
||||
if e.LogName != "" {
|
||||
return nil, errors.New("logging: Entry.LogName should be not be set when writing")
|
||||
}
|
||||
|
@ -882,7 +834,7 @@ func (l *Logger) toLogEntry(e Entry) (*logpb.LogEntry, error) {
|
|||
// https://cloud.google.com/appengine/docs/flexible/go/writing-application-logs.
|
||||
traceID, spanID, traceSampled := deconstructXCloudTraceContext(traceHeader)
|
||||
if traceID != "" {
|
||||
e.Trace = fmt.Sprintf("%s/traces/%s", l.client.parent, traceID)
|
||||
e.Trace = fmt.Sprintf("%s/traces/%s", parent, traceID)
|
||||
}
|
||||
if e.SpanID == "" {
|
||||
e.SpanID = spanID
|
||||
|
@ -894,11 +846,19 @@ func (l *Logger) toLogEntry(e Entry) (*logpb.LogEntry, error) {
|
|||
e.TraceSampled = e.TraceSampled || traceSampled
|
||||
}
|
||||
}
|
||||
req, err := fromHTTPRequest(e.HTTPRequest)
|
||||
if err != nil {
|
||||
if client != nil {
|
||||
client.error(err)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ent := &logpb.LogEntry{
|
||||
Timestamp: ts,
|
||||
Severity: logtypepb.LogSeverity(e.Severity),
|
||||
InsertId: e.InsertID,
|
||||
HttpRequest: fromHTTPRequest(e.HTTPRequest),
|
||||
HttpRequest: req,
|
||||
Operation: e.Operation,
|
||||
Labels: e.Labels,
|
||||
Trace: e.Trace,
|
||||
|
|
268
vendor/cloud.google.com/go/logging/resource.go
generated
vendored
Normal file
268
vendor/cloud.google.com/go/logging/resource.go
generated
vendored
Normal file
|
@ -0,0 +1,268 @@
|
|||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
mrpb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
)
|
||||
|
||||
// CommonResource sets the monitored resource associated with all log entries
|
||||
// written from a Logger. If not provided, the resource is automatically
|
||||
// detected based on the running environment (on GCE, GCR, GCF and GAE Standard only).
|
||||
// This value can be overridden per-entry by setting an Entry's Resource field.
|
||||
func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} }
|
||||
|
||||
type commonResource struct{ *mrpb.MonitoredResource }
|
||||
|
||||
func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource }
|
||||
|
||||
var detectedResource struct {
|
||||
pb *mrpb.MonitoredResource
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// isAppEngine returns true for both standard and flex
|
||||
func isAppEngine() bool {
|
||||
_, service := os.LookupEnv("GAE_SERVICE")
|
||||
_, version := os.LookupEnv("GAE_VERSION")
|
||||
_, instance := os.LookupEnv("GAE_INSTANCE")
|
||||
|
||||
return service && version && instance
|
||||
}
|
||||
|
||||
func detectAppEngineResource() *mrpb.MonitoredResource {
|
||||
projectID, err := metadata.ProjectID()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if projectID == "" {
|
||||
projectID = os.Getenv("GOOGLE_CLOUD_PROJECT")
|
||||
}
|
||||
zone, err := metadata.Zone()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &mrpb.MonitoredResource{
|
||||
Type: "gae_app",
|
||||
Labels: map[string]string{
|
||||
"project_id": projectID,
|
||||
"module_id": os.Getenv("GAE_SERVICE"),
|
||||
"version_id": os.Getenv("GAE_VERSION"),
|
||||
"instance_id": os.Getenv("GAE_INSTANCE"),
|
||||
"runtime": os.Getenv("GAE_RUNTIME"),
|
||||
"zone": zone,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func isCloudFunction() bool {
|
||||
// Reserved envvars in older function runtimes, e.g. Node.js 8, Python 3.7 and Go 1.11.
|
||||
_, name := os.LookupEnv("FUNCTION_NAME")
|
||||
_, region := os.LookupEnv("FUNCTION_REGION")
|
||||
_, entry := os.LookupEnv("ENTRY_POINT")
|
||||
|
||||
// Reserved envvars in newer function runtimes.
|
||||
_, target := os.LookupEnv("FUNCTION_TARGET")
|
||||
_, signature := os.LookupEnv("FUNCTION_SIGNATURE_TYPE")
|
||||
_, service := os.LookupEnv("K_SERVICE")
|
||||
return (name && region && entry) || (target && signature && service)
|
||||
}
|
||||
|
||||
func detectCloudFunction() *mrpb.MonitoredResource {
|
||||
projectID, err := metadata.ProjectID()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
zone, err := metadata.Zone()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// Newer functions runtimes store name in K_SERVICE.
|
||||
functionName, exists := os.LookupEnv("K_SERVICE")
|
||||
if !exists {
|
||||
functionName, _ = os.LookupEnv("FUNCTION_NAME")
|
||||
}
|
||||
return &mrpb.MonitoredResource{
|
||||
Type: "cloud_function",
|
||||
Labels: map[string]string{
|
||||
"project_id": projectID,
|
||||
"region": regionFromZone(zone),
|
||||
"function_name": functionName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func isCloudRun() bool {
|
||||
_, config := os.LookupEnv("K_CONFIGURATION")
|
||||
_, service := os.LookupEnv("K_SERVICE")
|
||||
_, revision := os.LookupEnv("K_REVISION")
|
||||
return config && service && revision
|
||||
}
|
||||
|
||||
func detectCloudRunResource() *mrpb.MonitoredResource {
|
||||
projectID, err := metadata.ProjectID()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
zone, err := metadata.Zone()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return &mrpb.MonitoredResource{
|
||||
Type: "cloud_run_revision",
|
||||
Labels: map[string]string{
|
||||
"project_id": projectID,
|
||||
"location": regionFromZone(zone),
|
||||
"service_name": os.Getenv("K_SERVICE"),
|
||||
"revision_name": os.Getenv("K_REVISION"),
|
||||
"configuration_name": os.Getenv("K_CONFIGURATION"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func isKubernetesEngine() bool {
|
||||
clusterName, err := metadata.InstanceAttributeValue("cluster-name")
|
||||
// Note: InstanceAttributeValue can return "", nil
|
||||
if err != nil || clusterName == "" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func detectKubernetesResource() *mrpb.MonitoredResource {
|
||||
projectID, err := metadata.ProjectID()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
zone, err := metadata.Zone()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
clusterName, err := metadata.InstanceAttributeValue("cluster-name")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
namespaceBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
namespaceName := ""
|
||||
if err == nil {
|
||||
namespaceName = string(namespaceBytes)
|
||||
}
|
||||
return &mrpb.MonitoredResource{
|
||||
Type: "k8s_container",
|
||||
Labels: map[string]string{
|
||||
"cluster_name": clusterName,
|
||||
"location": zone,
|
||||
"project_id": projectID,
|
||||
"pod_name": os.Getenv("HOSTNAME"),
|
||||
"namespace_name": namespaceName,
|
||||
// To get the `container_name` label, users need to explicitly provide it.
|
||||
"container_name": os.Getenv("CONTAINER_NAME"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func detectGCEResource() *mrpb.MonitoredResource {
|
||||
projectID, err := metadata.ProjectID()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
id, err := metadata.InstanceID()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
zone, err := metadata.Zone()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
name, err := metadata.InstanceName()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return &mrpb.MonitoredResource{
|
||||
Type: "gce_instance",
|
||||
Labels: map[string]string{
|
||||
"project_id": projectID,
|
||||
"instance_id": id,
|
||||
"instance_name": name,
|
||||
"zone": zone,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func detectResource() *mrpb.MonitoredResource {
|
||||
detectedResource.once.Do(func() {
|
||||
switch {
|
||||
// AppEngine, Functions, CloudRun, Kubernetes are detected first,
|
||||
// as metadata.OnGCE() erroneously returns true on these runtimes.
|
||||
case isAppEngine():
|
||||
detectedResource.pb = detectAppEngineResource()
|
||||
case isCloudFunction():
|
||||
detectedResource.pb = detectCloudFunction()
|
||||
case isCloudRun():
|
||||
detectedResource.pb = detectCloudRunResource()
|
||||
case isKubernetesEngine():
|
||||
detectedResource.pb = detectKubernetesResource()
|
||||
case metadata.OnGCE():
|
||||
detectedResource.pb = detectGCEResource()
|
||||
}
|
||||
})
|
||||
return detectedResource.pb
|
||||
}
|
||||
|
||||
var resourceInfo = map[string]struct{ rtype, label string }{
|
||||
"organizations": {"organization", "organization_id"},
|
||||
"folders": {"folder", "folder_id"},
|
||||
"projects": {"project", "project_id"},
|
||||
"billingAccounts": {"billing_account", "account_id"},
|
||||
}
|
||||
|
||||
func monitoredResource(parent string) *mrpb.MonitoredResource {
|
||||
parts := strings.SplitN(parent, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return globalResource(parent)
|
||||
}
|
||||
info, ok := resourceInfo[parts[0]]
|
||||
if !ok {
|
||||
return globalResource(parts[1])
|
||||
}
|
||||
return &mrpb.MonitoredResource{
|
||||
Type: info.rtype,
|
||||
Labels: map[string]string{info.label: parts[1]},
|
||||
}
|
||||
}
|
||||
|
||||
func regionFromZone(zone string) string {
|
||||
cutoff := strings.LastIndex(zone, "-")
|
||||
if cutoff > 0 {
|
||||
return zone[:cutoff]
|
||||
}
|
||||
return zone
|
||||
}
|
||||
|
||||
func globalResource(projectID string) *mrpb.MonitoredResource {
|
||||
return &mrpb.MonitoredResource{
|
||||
Type: "global",
|
||||
Labels: map[string]string{
|
||||
"project_id": projectID,
|
||||
},
|
||||
}
|
||||
}
|
236
vendor/cloud.google.com/go/testing.md
generated
vendored
Normal file
236
vendor/cloud.google.com/go/testing.md
generated
vendored
Normal file
|
@ -0,0 +1,236 @@
|
|||
# Testing Code that depends on Go Client Libraries
|
||||
|
||||
The Go client libraries generated as a part of `cloud.google.com/go` all take
|
||||
the approach of returning concrete types instead of interfaces. That way, new
|
||||
fields and methods can be added to the libraries without breaking users. This
|
||||
document will go over some patterns that can be used to test code that depends
|
||||
on the Go client libraries.
|
||||
|
||||
## Testing gRPC services using fakes
|
||||
|
||||
*Note*: You can see the full
|
||||
[example code using a fake here](https://github.com/googleapis/google-cloud-go/tree/master/internal/examples/fake).
|
||||
|
||||
The clients found in `cloud.google.com/go` are gRPC based, with a couple of
|
||||
notable exceptions being the [`storage`](https://pkg.go.dev/cloud.google.com/go/storage)
|
||||
and [`bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) clients.
|
||||
Interactions with gRPC services can be faked by serving up your own in-memory
|
||||
server within your test. One benefit of using this approach is that you don’t
|
||||
need to define an interface in your runtime code; you can keep using
|
||||
concrete struct types. You instead define a fake server in your test code. For
|
||||
example, take a look at the following function:
|
||||
|
||||
```go
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
translate "cloud.google.com/go/translate/apiv3"
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
|
||||
)
|
||||
|
||||
func TranslateTextWithConcreteClient(client *translate.TranslationClient, text string, targetLang string) (string, error) {
|
||||
ctx := context.Background()
|
||||
log.Printf("Translating %q to %q", text, targetLang)
|
||||
req := &translatepb.TranslateTextRequest{
|
||||
Parent: fmt.Sprintf("projects/%s/locations/global", os.Getenv("GOOGLE_CLOUD_PROJECT")),
|
||||
TargetLanguageCode: "en-US",
|
||||
Contents: []string{text},
|
||||
}
|
||||
resp, err := client.TranslateText(ctx, req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to translate text: %v", err)
|
||||
}
|
||||
translations := resp.GetTranslations()
|
||||
if len(translations) != 1 {
|
||||
return "", fmt.Errorf("expected only one result, got %d", len(translations))
|
||||
}
|
||||
return translations[0].TranslatedText, nil
|
||||
}
|
||||
```
|
||||
|
||||
Here is an example of what a fake server implementation would look like for
|
||||
faking the interactions above:
|
||||
|
||||
```go
|
||||
import (
|
||||
"context"
|
||||
|
||||
translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
|
||||
)
|
||||
|
||||
type fakeTranslationServer struct {
|
||||
translatepb.UnimplementedTranslationServiceServer
|
||||
}
|
||||
|
||||
func (f *fakeTranslationServer) TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest) (*translatepb.TranslateTextResponse, error) {
|
||||
resp := &translatepb.TranslateTextResponse{
|
||||
Translations: []*translatepb.Translation{
|
||||
&translatepb.Translation{
|
||||
TranslatedText: "Hello World",
|
||||
},
|
||||
},
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
```
|
||||
|
||||
All of the generated protobuf code found in [google.golang.org/genproto](https://pkg.go.dev/google.golang.org/genproto)
|
||||
contains a similar `package.UnimplmentedFooServer` type that is useful for
|
||||
creating fakes. By embedding the unimplemented server in the
|
||||
`fakeTranslationServer`, the fake will “inherit” all of the RPCs the server
|
||||
exposes. Then, by providing our own `fakeTranslationServer.TranslateText`
|
||||
method you can “override” the default unimplemented behavior of the one RPC that
|
||||
you would like to be faked.
|
||||
|
||||
The test itself does require a little bit of setup: start up a `net.Listener`,
|
||||
register the server, and tell the client library to call the server:
|
||||
|
||||
```go
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
translate "cloud.google.com/go/translate/apiv3"
|
||||
"google.golang.org/api/option"
|
||||
translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestTranslateTextWithConcreteClient(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Setup the fake server.
|
||||
fakeTranslationServer := &fakeTranslationServer{}
|
||||
l, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gsrv := grpc.NewServer()
|
||||
translatepb.RegisterTranslationServiceServer(gsrv, fakeTranslationServer)
|
||||
fakeServerAddr := l.Addr().String()
|
||||
go func() {
|
||||
if err := gsrv.Serve(l); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a client.
|
||||
client, err := translate.NewTranslationClient(ctx,
|
||||
option.WithEndpoint(fakeServerAddr),
|
||||
option.WithoutAuthentication(),
|
||||
option.WithGRPCDialOption(grpc.WithInsecure()),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Run the test.
|
||||
text, err := TranslateTextWithConcreteClient(client, "Hola Mundo", "en-US")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if text != "Hello World" {
|
||||
t.Fatalf("got %q, want Hello World", text)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Testing using mocks
|
||||
|
||||
*Note*: You can see the full
|
||||
[example code using a mock here](https://github.com/googleapis/google-cloud-go/tree/master/internal/examples/mock).
|
||||
|
||||
When mocking code you need to work with interfaces. Let’s create an interface
|
||||
for the `cloud.google.com/go/translate/apiv3` client used in the
|
||||
`TranslateTextWithConcreteClient` function mentioned in the previous section.
|
||||
The `translate.Client` has over a dozen methods but this code only uses one of
|
||||
them. Here is an interface that satisfies the interactions of the
|
||||
`translate.Client` in this function.
|
||||
|
||||
```go
|
||||
type TranslationClient interface {
|
||||
TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error)
|
||||
}
|
||||
```
|
||||
|
||||
Now that we have an interface that satisfies the method being used we can
|
||||
rewrite the function signature to take the interface instead of the concrete
|
||||
type.
|
||||
|
||||
```go
|
||||
func TranslateTextWithInterfaceClient(client TranslationClient, text string, targetLang string) (string, error) {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
This allows a real `translate.Client` to be passed to the method in production
|
||||
and for a mock implementation to be passed in during testing. This pattern can
|
||||
be applied to any Go code, not just `cloud.google.com/go`. This is because
|
||||
interfaces in Go are implicitly satisfied. Structs in the client libraries can
|
||||
implicitly implement interfaces defined in your codebase. Let’s take a look at
|
||||
what it might look like to define a lightweight mock for the `TranslationClient`
|
||||
interface.
|
||||
|
||||
```go
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
|
||||
)
|
||||
|
||||
type mockClient struct{}
|
||||
|
||||
func (*mockClient) TranslateText(_ context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) {
|
||||
resp := &translatepb.TranslateTextResponse{
|
||||
Translations: []*translatepb.Translation{
|
||||
&translatepb.Translation{
|
||||
TranslatedText: "Hello World",
|
||||
},
|
||||
},
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func TestTranslateTextWithAbstractClient(t *testing.T) {
|
||||
client := &mockClient{}
|
||||
text, err := TranslateTextWithInterfaceClient(client, "Hola Mundo", "en-US")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if text != "Hello World" {
|
||||
t.Fatalf("got %q, want Hello World", text)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If you prefer to not write your own mocks there are mocking frameworks such as
|
||||
[golang/mock](https://github.com/golang/mock) which can generate mocks for you
|
||||
from an interface. As a word of caution though, try to not
|
||||
[overuse mocks](https://testing.googleblog.com/2013/05/testing-on-toilet-dont-overuse-mocks.html).
|
||||
|
||||
## Testing using emulators
|
||||
|
||||
Some of the client libraries provided in `cloud.google.com/go` support running
|
||||
against a service emulator. The concept is similar to that of using fakes,
|
||||
mentioned above, but the server is managed for you. You just need to start it up
|
||||
and instruct the client library to talk to the emulator by setting a service
|
||||
specific emulator environment variable. Current services/environment-variables
|
||||
are:
|
||||
|
||||
- bigtable: `BIGTABLE_EMULATOR_HOST`
|
||||
- datastore: `DATASTORE_EMULATOR_HOST`
|
||||
- firestore: `FIRESTORE_EMULATOR_HOST`
|
||||
- pubsub: `PUBSUB_EMULATOR_HOST`
|
||||
- spanner: `SPANNER_EMULATOR_HOST`
|
||||
- storage: `STORAGE_EMULATOR_HOST`
|
||||
- Although the storage client supports an emulator environment variable there is no official emulator provided by gcloud.
|
||||
|
||||
For more information on emulators please refer to the
|
||||
[gcloud documentation](https://cloud.google.com/sdk/gcloud/reference/beta/emulators).
|
|
@ -1,4 +1,4 @@
|
|||
Apache License
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
|
@ -199,4 +199,3 @@ Apache License
|
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
6280
vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go
generated
vendored
Normal file
6280
vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
2
vendor/github.com/containerd/containerd/.golangci.yml
generated
vendored
2
vendor/github.com/containerd/containerd/.golangci.yml
generated
vendored
|
@ -19,7 +19,7 @@ issues:
|
|||
- EXC0002
|
||||
|
||||
run:
|
||||
timeout: 3m
|
||||
timeout: 8m
|
||||
skip-dirs:
|
||||
- api
|
||||
- design
|
||||
|
|
16
vendor/github.com/containerd/containerd/.mailmap
generated
vendored
16
vendor/github.com/containerd/containerd/.mailmap
generated
vendored
|
@ -29,13 +29,17 @@ Eric Ernst <eric@amperecomputing.com> <eric.ernst@intel.com>
|
|||
Eric Ren <renzhen.rz@linux.alibaba.com> <renzhen@linux.alibaba.com>
|
||||
Eric Ren <renzhen.rz@linux.alibaba.com> <renzhen.rz@alibaba-linux.com>
|
||||
Eric Ren <renzhen.rz@linux.alibaba.com> <renzhen.rz@alibaba-inc.com>
|
||||
Fabiano Fidêncio <fidencio@redhat.com> <fabiano.fidencio@intel.com>
|
||||
Fahed Dorgaa <fahed.dorgaa@gmail.com>
|
||||
Frank Yang <yyb196@gmail.com>
|
||||
Fupan Li <lifupan@gmail.com>
|
||||
Fupan Li <lifupan@gmail.com> <fupan.lfp@antfin.com>
|
||||
Fupan Li <lifupan@gmail.com> <fupan.lfp@antgroup.com>
|
||||
Furkan Türkal <furkan.turkal@trendyol.com>
|
||||
Georgia Panoutsakopoulou <gpanoutsak@gmail.com>
|
||||
Guangming Wang <guangming.wang@daocloud.io>
|
||||
Haiyan Meng <haiyanmeng@google.com>
|
||||
haoyun <yun.hao@daocloud.io>
|
||||
Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>
|
||||
Hu Shuai <hus.fnst@cn.fujitsu.com>
|
||||
Hu Shuai <hus.fnst@cn.fujitsu.com> <hushuaiia@qq.com>
|
||||
|
@ -53,15 +57,18 @@ John Howard <github@lowenna.com> <jhoward@microsoft.com>
|
|||
John Howard <github@lowenna.com> <jhowardmsft@users.noreply.github.com>
|
||||
Lorenz Brun <lorenz@brun.one> <lorenz@nexantic.com>
|
||||
Luc Perkins <lucperkins@gmail.com>
|
||||
Jiajun Jiang <levinxo@gmail.com>
|
||||
Julien Balestra <julien.balestra@datadoghq.com>
|
||||
Jun Lin Chen <webmaster@mc256.com> <1913688+mc256@users.noreply.github.com>
|
||||
Justin Cormack <justin.cormack@docker.com> <justin@specialbusservice.com>
|
||||
Justin Terry <juterry@microsoft.com>
|
||||
Justin Terry <juterry@microsoft.com> <jterry75@users.noreply.github.com>
|
||||
Kante <kerthcet@gmail.com>
|
||||
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
||||
Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||
Kevin Parsons <kevpar@microsoft.com> <kevpar@users.noreply.github.com>
|
||||
Kevin Xu <cming.xu@gmail.com>
|
||||
Kitt Hsu <kitt.hsu@gmail.com>
|
||||
Kohei Tokunaga <ktokunaga.mail@gmail.com>
|
||||
Krasi Georgiev <krasi.root@gmail.com> <krasi@vip-consult.solutions>
|
||||
Lantao Liu <lantaol@google.com>
|
||||
|
@ -69,6 +76,7 @@ Lantao Liu <lantaol@google.com> <taotaotheripper@gmail.com>
|
|||
Li Yuxuan <liyuxuan04@baidu.com> <darfux@163.com>
|
||||
Lifubang <lifubang@aliyun.com> <lifubang@acmcoder.com>
|
||||
Lu Jingxiao <lujingxiao@huawei.com>
|
||||
Maksym Pavlenko <pavlenko.maksym@gmail.com> <865334+mxpv@users.noreply.github.com>
|
||||
Maksym Pavlenko <pavlenko.maksym@gmail.com> <makpav@amazon.com>
|
||||
Maksym Pavlenko <pavlenko.maksym@gmail.com> <mxpv@apple.com>
|
||||
Mario Hros <spam@k3a.me>
|
||||
|
@ -79,6 +87,9 @@ Michael Crosby <crosbymichael@gmail.com> <michael@thepasture.io>
|
|||
Michael Katsoulis <michaelkatsoulis88@gmail.com>
|
||||
Mike Brown <brownwm@us.ibm.com> <mikebrow@users.noreply.github.com>
|
||||
Mohammad Asif Siddiqui <mohammad.asif.siddiqui1@huawei.com>
|
||||
Ng Yang <wssccc@qq.com>
|
||||
Ning Li <ning.a.li@transwarp.io>
|
||||
ningmingxiao <ning.mingxiao@zte.com.cn>
|
||||
Nishchay Kumar <mrawesomenix@gmail.com>
|
||||
Oliver Stenbom <oliver@stenbom.eu> <ostenbom@pivotal.io>
|
||||
Phil Estes <estesp@gmail.com> <estesp@linux.vnet.ibm.com>
|
||||
|
@ -104,16 +115,20 @@ Stephen J Day <stevvooe@gmail.com> <stephen.day@docker.com>
|
|||
Sudeesh John <sudeesh@linux.vnet.ibm.com>
|
||||
Su Fei <fesu@ebay.com> <fesu@ebay.com>
|
||||
Su Xiaolin <linxxnil@126.com>
|
||||
Takumasa Sakao <sakataku7@gmail.com> <tsakao@zlab.co.jp>
|
||||
Ted Yu <yuzhihong@gmail.com>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
Wade Lee <weidonglee27@gmail.com>
|
||||
Wade Lee <weidonglee27@gmail.com> <weidonglee29@gmail.com>
|
||||
Wade Lee <weidonglee27@gmail.com> <21621232@zju.edu.cn>
|
||||
wanglei <wllenyj@linux.alibaba.com>
|
||||
wanglei <wllenyj@linux.alibaba.com> <wanglei01@alibaba-inc.com>
|
||||
wangzhan <wang.zhan@smartx.com>
|
||||
Wei Fu <fuweid89@gmail.com>
|
||||
Wei Fu <fuweid89@gmail.com> <fhfuwei@163.com>
|
||||
Xiaodong Zhang <a4012017@sina.com>
|
||||
Xuean Yan <yan.xuean@zte.com.cn>
|
||||
Yang Yang <yang8518296@163.com>
|
||||
Yue Zhang <zy675793960@yeah.net>
|
||||
Yuxing Liu <starnop@163.com>
|
||||
Zhang Wei <zhangwei555@huawei.com>
|
||||
|
@ -124,4 +139,5 @@ Zhiyu Li <payall4u@qq.com> <404977848@qq.com>
|
|||
Zhongming Chang<zhongming.chang@daocloud.io>
|
||||
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||
Zhoulin Xie <zhoulin.xie@daocloud.io> <42261994+JoeWrightss@users.noreply.github.com>
|
||||
zounengren <zouyee1989@gmail.com> <zounengren@cmss.chinamobile.com>
|
||||
张潇 <xiaozhang0210@hotmail.com>
|
||||
|
|
14
vendor/github.com/containerd/containerd/ADOPTERS.md
generated
vendored
14
vendor/github.com/containerd/containerd/ADOPTERS.md
generated
vendored
|
@ -12,10 +12,14 @@ including the Balena project listed below.
|
|||
|
||||
**_[IBM Cloud Private (ICP)](https://www.ibm.com/cloud/private)_** - IBM's on-premises cloud offering has containerd as a "tech preview" CRI runtime for the Kubernetes offered within this product for the past two releases, and plans to fully migrate to containerd in a future release.
|
||||
|
||||
**_[Google Cloud Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/)_** - offers containerd as the CRI runtime in **beta** for recent versions of Kubernetes.
|
||||
**_[Google Cloud Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/)_** - containerd has been offered in GKE since version 1.14 and has been the default runtime since version 1.19. It is also the only supported runtime for GKE Autopilot from the launch. [More details](https://cloud.google.com/kubernetes-engine/docs/concepts/using-containerd)
|
||||
|
||||
**_[AWS Fargate](https://aws.amazon.com/fargate)_** - uses containerd + Firecracker (noted below) as the runtime and isolation technology for containers run in the Fargate platform. Fargate is a serverless, container-native compute offering from Amazon Web Services.
|
||||
|
||||
**_[Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/)_** - EKS optionally offers containerd as a CRI runtime starting with Kubernetes version 1.21. In Kubernetes 1.22 the default CRI runtime will be containerd.
|
||||
|
||||
**_[Bottlerocket](https://aws.amazon.com/bottlerocket/)_** - Bottlerocket is a Linux distribution from Amazon Web Services purpose-built for containers using containerd as the core system runtime.
|
||||
|
||||
**_Cloud Foundry_** - The [Guardian container manager](https://github.com/cloudfoundry/guardian) for CF has been using OCI runC directly with additional code from CF managing the container image and filesystem interactions, but have recently migrated to use containerd as a replacement for the extra code they had written around runC.
|
||||
|
||||
**_Alibaba's PouchContainer_** - The Alibaba [PouchContainer](https://github.com/alibaba/pouch) project uses containerd as its runtime for a cloud native offering that has unique isolation and image distribution capabilities.
|
||||
|
@ -32,7 +36,7 @@ including the Balena project listed below.
|
|||
|
||||
**_BuildKit_** - The Moby project's [BuildKit](https://github.com/moby/buildkit) can use either runC or containerd as build execution backends for building container images. BuildKit support has also been built into the Docker engine in recent releases, making BuildKit provide the backend to the `docker build` command.
|
||||
|
||||
**_Azure acs-engine_** - Microsoft Azure's [acs-engine](https://github.com/Azure/acs-engine) open source project has customizable deployment of Kubernetes clusters, where containerd is a selectable container runtime. At some point in the future Azure's AKS service will default to use containerd as the CRI runtime for deployed Kubernetes clusters.
|
||||
**_[Azure Kubernetes Service (AKS)](https://azure.microsoft.com/services/kubernetes-service)_** - Microsoft's managed Kubernetes offering uses containerd for Linux nodes running v1.19 or greater. Containerd for Windows nodes is currently in public preview. [More Details](https://docs.microsoft.com/azure/aks/cluster-configuration#container-runtime-configuration)
|
||||
|
||||
**_Amazon Firecracker_** - The AWS [Firecracker VMM project](http://firecracker-microvm.io/) has extended containerd with a new snapshotter and v2 shim to allow containerd to drive virtualized container processes via their VMM implementation. More details on their containerd integration are available in [their GitHub project](https://github.com/firecracker-microvm/firecracker-containerd).
|
||||
|
||||
|
@ -42,6 +46,12 @@ including the Balena project listed below.
|
|||
|
||||
**_Inclavare Containers_** - [Inclavare Containers](https://github.com/alibaba/inclavare-containers) is an innovation of container runtime with the novel approach for launching protected containers in hardware-assisted Trusted Execution Environment (TEE) technology, aka Enclave, which can prevent the untrusted entity, such as Cloud Service Provider (CSP), from accessing the sensitive and confidential assets in use.
|
||||
|
||||
**_VMware TKG_** - [Tanzu Kubernetes Grid](https://tanzu.vmware.com/kubernetes-grid) VMware's Multicloud Kubernetes offering uses containerd as the default CRI runtime.
|
||||
|
||||
**_VMware TCE_** - [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) VMware's fully-featured, easy to manage, Kubernetes platform for learners and users. It is a freely available, community supported, and open source distribution of VMware Tanzu. It uses containerd as the default CRI runtime.
|
||||
|
||||
**_[Talos Linux](https://www.talos.dev/)_** - Talos Linux is Linux designed for Kubernetes – secure, immutable, and minimal. Talos Linux is using containerd as the core system runtime and CRI implementation.
|
||||
|
||||
**_Other Projects_** - While the above list provides a cross-section of well known uses of containerd, the simplicity and clear API layer for containerd has inspired many smaller projects around providing simple container management platforms. Several examples of building higher layer functionality on top of the containerd base have come from various containerd community participants:
|
||||
- Michael Crosby's [boss](https://github.com/crosbymichael/boss) project,
|
||||
- Evan Hazlett's [stellar](https://github.com/ehazlett/stellar) project,
|
||||
|
|
85
vendor/github.com/containerd/containerd/BUILDING.md
generated
vendored
85
vendor/github.com/containerd/containerd/BUILDING.md
generated
vendored
|
@ -15,7 +15,7 @@ This doc includes:
|
|||
To build the `containerd` daemon, and the `ctr` simple test client, the following build system dependencies are required:
|
||||
|
||||
* Go 1.13.x or above except 1.14.x
|
||||
* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
|
||||
* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/protocolbuffers/protobuf/releases))
|
||||
* Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via the build tag `no_btrfs`, removing this dependency.
|
||||
|
||||
## Build the development environment
|
||||
|
@ -32,9 +32,9 @@ git clone https://github.com/containerd/containerd
|
|||
|
||||
For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.11.4 release for a 64-bit Linux host:
|
||||
|
||||
```
|
||||
$ wget -c https://github.com/google/protobuf/releases/download/v3.11.4/protoc-3.11.4-linux-x86_64.zip
|
||||
$ sudo unzip protoc-3.11.4-linux-x86_64.zip -d /usr/local
|
||||
```sh
|
||||
wget -c https://github.com/protocolbuffers/protobuf/releases/download/v3.11.4/protoc-3.11.4-linux-x86_64.zip
|
||||
sudo unzip protoc-3.11.4-linux-x86_64.zip -d /usr/local
|
||||
```
|
||||
|
||||
`containerd` uses [Btrfs](https://en.wikipedia.org/wiki/Btrfs) it means that you
|
||||
|
@ -46,38 +46,20 @@ need to satisfy these dependencies in your system:
|
|||
|
||||
At this point you are ready to build `containerd` yourself!
|
||||
|
||||
## Build runc
|
||||
## Runc
|
||||
|
||||
`runc` is the default container runtime used by `containerd` and is required to
|
||||
run containerd. While it is okay to download a runc binary and install that on
|
||||
Runc is the default container runtime used by `containerd` and is required to
|
||||
run containerd. While it is okay to download a `runc` binary and install that on
|
||||
the system, sometimes it is necessary to build runc directly when working with
|
||||
container runtime development. You can skip this step if you already have the
|
||||
correct version of `runc` installed.
|
||||
|
||||
`runc` requires `libseccomp`. You may need to install the missing dependencies:
|
||||
|
||||
* CentOS/Fedora: `yum install libseccomp libseccomp-devel`
|
||||
* Debian/Ubuntu: `apt-get install libseccomp libseccomp-dev`
|
||||
|
||||
|
||||
For the quick and dirty installation, you can use the following:
|
||||
|
||||
```
|
||||
git clone https://github.com/opencontainers/runc
|
||||
cd runc
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
Make sure to follow the guidelines for versioning in [RUNC.md](/docs/RUNC.md) for the
|
||||
best results.
|
||||
container runtime development. Make sure to follow the guidelines for versioning
|
||||
in [RUNC.md](/docs/RUNC.md) for the best results.
|
||||
|
||||
## Build containerd
|
||||
|
||||
`containerd` uses `make` to create a repeatable build flow. It means that you
|
||||
can run:
|
||||
|
||||
```
|
||||
```sh
|
||||
cd containerd
|
||||
make
|
||||
```
|
||||
|
@ -86,22 +68,44 @@ This is going to build all the project binaries in the `./bin/` directory.
|
|||
|
||||
You can move them in your global path, `/usr/local/bin` with:
|
||||
|
||||
```sudo
|
||||
```sh
|
||||
sudo make install
|
||||
```
|
||||
|
||||
The install prefix can be changed by passing the `PREFIX` variable (defaults
|
||||
to `/usr/local`).
|
||||
|
||||
Note: if you set one of these vars, set them to the same values on all make stages
|
||||
(build as well as install).
|
||||
|
||||
If you want to prepend an additional prefix on actual installation (eg. packaging or chroot install),
|
||||
you can pass it via `DESTDIR` variable:
|
||||
|
||||
```sh
|
||||
sudo make install DESTDIR=/tmp/install-x973234/
|
||||
```
|
||||
|
||||
The above command installs the `containerd` binary to `/tmp/install-x973234/usr/local/bin/containerd`
|
||||
|
||||
The current `DESTDIR` convention is supported since containerd v1.6.
|
||||
Older releases was using `DESTDIR` for a different purpose that is similar to `PREFIX`.
|
||||
|
||||
|
||||
When making any changes to the gRPC API, you can use the installed `protoc`
|
||||
compiler to regenerate the API generated code packages with:
|
||||
|
||||
```sudo
|
||||
```sh
|
||||
make generate
|
||||
```
|
||||
|
||||
> *Note*: Several build tags are currently available:
|
||||
> * `no_btrfs`: A build tag disables building the btrfs snapshot driver.
|
||||
> * `no_cri`: A build tag disables building Kubernetes [CRI](http://blog.kubernetes.io/2016/12/container-runtime-interface-cri-in-kubernetes.html) support into containerd.
|
||||
> See [here](https://github.com/containerd/cri-containerd#build-tags) for build tags of CRI plugin.
|
||||
> * `no_devmapper`: A build tag disables building the device mapper snapshot driver.
|
||||
> * snapshotters (alphabetical order)
|
||||
> * `no_aufs`: A build tag disables building the aufs snapshot driver.
|
||||
> * `no_btrfs`: A build tag disables building the Btrfs snapshot driver.
|
||||
> * `no_devmapper`: A build tag disables building the device mapper snapshot driver.
|
||||
> * `no_zfs`: A build tag disables building the ZFS snapshot driver.
|
||||
>
|
||||
> For example, adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
|
||||
> Makefile target will disable the btrfs driver within the containerd Go build.
|
||||
|
@ -117,7 +121,7 @@ Please refer to [RUNC.md](/docs/RUNC.md) for the currently supported version of
|
|||
|
||||
You can build static binaries by providing a few variables to `make`:
|
||||
|
||||
```sudo
|
||||
```sh
|
||||
make EXTRA_FLAGS="-buildmode pie" \
|
||||
EXTRA_LDFLAGS='-linkmode external -extldflags "-fno-PIC -static"' \
|
||||
BUILDTAGS="netgo osusergo static_build"
|
||||
|
@ -131,12 +135,12 @@ make EXTRA_FLAGS="-buildmode pie" \
|
|||
|
||||
The following instructions assume you are at the parent directory of containerd source directory.
|
||||
|
||||
## Build containerd
|
||||
## Build containerd in a container
|
||||
|
||||
You can build `containerd` via a Linux-based Docker container.
|
||||
You can build an image from this `Dockerfile`:
|
||||
|
||||
```
|
||||
```dockerfile
|
||||
FROM golang
|
||||
|
||||
RUN apt-get update && \
|
||||
|
@ -158,10 +162,11 @@ This mounts `containerd` repository
|
|||
You are now ready to [build](#build-containerd):
|
||||
|
||||
```sh
|
||||
make && make install
|
||||
make && make install
|
||||
```
|
||||
|
||||
## Build containerd and runc
|
||||
## Build containerd and runc in a container
|
||||
|
||||
To have complete core container runtime, you will need both `containerd` and `runc`. It is possible to build both of these via Docker container.
|
||||
|
||||
You can use `git` to checkout `runc`:
|
||||
|
@ -177,7 +182,6 @@ FROM golang
|
|||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y libbtrfs-dev libseccomp-dev
|
||||
|
||||
```
|
||||
|
||||
In our Docker container we will build `runc` build, which includes
|
||||
|
@ -246,6 +250,7 @@ go test -v -run . -test.root
|
|||
```
|
||||
|
||||
Example output from directly running `go test` to execute the `TestContainerList` test:
|
||||
|
||||
```sh
|
||||
sudo go test -v -run "TestContainerList" . -test.root
|
||||
INFO[0000] running tests against containerd revision=f2ae8a020a985a8d9862c9eb5ab66902c2888361 version=v1.0.0-beta.2-49-gf2ae8a0
|
||||
|
@ -255,6 +260,10 @@ PASS
|
|||
ok github.com/containerd/containerd 4.778s
|
||||
```
|
||||
|
||||
> *Note*: in order to run `sudo go` you need to
|
||||
> - either keep user PATH environment variable. ex: `sudo "PATH=$PATH" env go test <args>`
|
||||
> - or use `go test -exec` ex: `go test -exec sudo -v -run "TestTarWithXattr" ./archive/ -test.root`
|
||||
|
||||
## Additional tools
|
||||
|
||||
### containerd-stress
|
||||
|
|
93
vendor/github.com/containerd/containerd/Makefile
generated
vendored
93
vendor/github.com/containerd/containerd/Makefile
generated
vendored
|
@ -15,16 +15,22 @@
|
|||
|
||||
# Go command to use for build
|
||||
GO ?= go
|
||||
INSTALL ?= install
|
||||
|
||||
# Root directory of the project (absolute path).
|
||||
ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
# Base path used to install.
|
||||
DESTDIR ?= /usr/local
|
||||
# The files will be installed under `$(DESTDIR)/$(PREFIX)`.
|
||||
# The convention of `DESTDIR` was changed in containerd v1.6.
|
||||
PREFIX ?= /usr/local
|
||||
DATADIR ?= $(PREFIX)/share
|
||||
MANDIR ?= $(DATADIR)/man
|
||||
|
||||
TEST_IMAGE_LIST ?=
|
||||
|
||||
# Used to populate variables in version package.
|
||||
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
||||
VERSION ?= $(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
||||
REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
|
||||
PACKAGE=github.com/containerd/containerd
|
||||
SHIM_CGO_ENABLED ?= 0
|
||||
|
@ -68,7 +74,7 @@ endif
|
|||
WHALE = "🇩"
|
||||
ONI = "👹"
|
||||
|
||||
RELEASE=containerd-$(VERSION:v%=%).${GOOS}-${GOARCH}
|
||||
RELEASE=containerd-$(VERSION:v%=%)-${GOOS}-${GOARCH}
|
||||
CRIRELEASE=cri-containerd-$(VERSION:v%=%)-${GOOS}-${GOARCH}
|
||||
CRICNIRELEASE=cri-containerd-cni-$(VERSION:v%=%)-${GOOS}-${GOARCH}
|
||||
|
||||
|
@ -83,12 +89,14 @@ ifdef BUILDTAGS
|
|||
endif
|
||||
GO_BUILDTAGS ?=
|
||||
GO_BUILDTAGS += ${DEBUG_TAGS}
|
||||
GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(GO_BUILDTAGS)",)
|
||||
GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(strip $(GO_BUILDTAGS))",)
|
||||
GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) $(EXTRA_LDFLAGS)'
|
||||
SHIM_GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) -extldflags "-static" $(EXTRA_LDFLAGS)'
|
||||
|
||||
# Project packages.
|
||||
PACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /vendor/ | grep -v /integration)
|
||||
API_PACKAGES=$(shell (cd api && $(GO) list ${GO_TAGS} ./... | grep -v /vendor/ | grep -v /integration))
|
||||
NON_API_PACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /vendor/ | grep -v /integration | grep -v "containerd/api")
|
||||
TEST_REQUIRES_ROOT_PACKAGES=$(filter \
|
||||
${PACKAGES}, \
|
||||
$(shell \
|
||||
|
@ -133,6 +141,9 @@ CRIDIR=$(OUTPUTDIR)/cri
|
|||
.PHONY: clean all AUTHORS build binaries test integration generate protos checkprotos coverage ci check help install uninstall vendor release mandir install-man genman install-cri-deps cri-release cri-cni-release cri-integration install-deps bin/cri-integration.test
|
||||
.DEFAULT: default
|
||||
|
||||
# Forcibly set the default goal to all, in case an include above brought in a rule definition.
|
||||
.DEFAULT_GOAL := all
|
||||
|
||||
all: binaries
|
||||
|
||||
check: proto-fmt ## run all linters
|
||||
|
@ -150,7 +161,13 @@ generate: protos
|
|||
|
||||
protos: bin/protoc-gen-gogoctrd ## generate protobuf
|
||||
@echo "$(WHALE) $@"
|
||||
@PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${PACKAGES}
|
||||
@find . -path ./vendor -prune -false -o -name '*.pb.go' | xargs rm
|
||||
$(eval TMPDIR := $(shell mktemp -d))
|
||||
@mv ${ROOTDIR}/vendor ${TMPDIR}
|
||||
@(cd ${ROOTDIR}/api && PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${API_PACKAGES})
|
||||
@(PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${NON_API_PACKAGES})
|
||||
@mv ${TMPDIR}/vendor ${ROOTDIR}
|
||||
@rm -rf ${TMPDIR}
|
||||
|
||||
check-protos: protos ## check if protobufs needs to be generated again
|
||||
@echo "$(WHALE) $@"
|
||||
|
@ -194,7 +211,7 @@ bin/cri-integration.test:
|
|||
|
||||
cri-integration: binaries bin/cri-integration.test ## run cri integration tests
|
||||
@echo "$(WHALE) $@"
|
||||
@./script/test/cri-integration.sh
|
||||
@bash -x ./script/test/cri-integration.sh
|
||||
@rm -rf bin/cri-integration.test
|
||||
|
||||
benchmark: ## run benchmarks tests
|
||||
|
@ -213,16 +230,16 @@ bin/%: cmd/% FORCE
|
|||
$(call BUILD_BINARY)
|
||||
|
||||
bin/containerd-shim: cmd/containerd-shim FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
|
||||
@echo "$(WHALE) bin/containerd-shim"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim
|
||||
@echo "$(WHALE) $@"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim
|
||||
|
||||
bin/containerd-shim-runc-v1: cmd/containerd-shim-runc-v1 FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
|
||||
@echo "$(WHALE) bin/containerd-shim-runc-v1"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim-runc-v1 ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v1
|
||||
@echo "$(WHALE) $@"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v1
|
||||
|
||||
bin/containerd-shim-runc-v2: cmd/containerd-shim-runc-v2 FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
|
||||
@echo "$(WHALE) bin/containerd-shim-runc-v2"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim-runc-v2 ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v2
|
||||
@echo "$(WHALE) $@"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v2
|
||||
|
||||
binaries: $(BINARIES) ## build binaries
|
||||
@echo "$(WHALE) $@"
|
||||
|
@ -238,30 +255,31 @@ genman: man/containerd.8 man/ctr.8
|
|||
|
||||
man/containerd.8: FORCE
|
||||
@echo "$(WHALE) $@"
|
||||
$(GO) run cmd/gen-manpages/main.go $(@F) $(@D)
|
||||
$(GO) run -mod=readonly ${GO_TAGS} cmd/gen-manpages/main.go $(@F) $(@D)
|
||||
|
||||
man/ctr.8: FORCE
|
||||
@echo "$(WHALE) $@"
|
||||
$(GO) run cmd/gen-manpages/main.go $(@F) $(@D)
|
||||
$(GO) run -mod=readonly ${GO_TAGS} cmd/gen-manpages/main.go $(@F) $(@D)
|
||||
|
||||
man/%: docs/man/%.md FORCE
|
||||
@echo "$(WHALE) $@"
|
||||
go-md2man -in "$<" -out "$@"
|
||||
|
||||
define installmanpage
|
||||
mkdir -p $(DESTDIR)/man/man$(2);
|
||||
gzip -c $(1) >$(DESTDIR)/man/man$(2)/$(3).gz;
|
||||
$(INSTALL) -d $(DESTDIR)$(MANDIR)/man$(2);
|
||||
gzip -c $(1) >$(DESTDIR)$(MANDIR)/man$(2)/$(3).gz;
|
||||
endef
|
||||
|
||||
install-man:
|
||||
install-man: man
|
||||
@echo "$(WHALE) $@"
|
||||
$(foreach manpage,$(addprefix man/,$(MANPAGES)), $(call installmanpage,$(manpage),$(subst .,,$(suffix $(manpage))),$(notdir $(manpage))))
|
||||
|
||||
|
||||
releases/$(RELEASE).tar.gz: $(BINARIES)
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -rf releases/$(RELEASE) releases/$(RELEASE).tar.gz
|
||||
@install -d releases/$(RELEASE)/bin
|
||||
@install $(BINARIES) releases/$(RELEASE)/bin
|
||||
@$(INSTALL) -d releases/$(RELEASE)/bin
|
||||
@$(INSTALL) $(BINARIES) releases/$(RELEASE)/bin
|
||||
@tar -czf releases/$(RELEASE).tar.gz -C releases/$(RELEASE) bin
|
||||
@rm -rf releases/$(RELEASE)
|
||||
|
||||
|
@ -272,18 +290,18 @@ release: releases/$(RELEASE).tar.gz
|
|||
# install of cri deps into release output directory
|
||||
ifeq ($(GOOS),windows)
|
||||
install-cri-deps: $(BINARIES)
|
||||
mkdir -p $(CRIDIR)
|
||||
$(INSTALL) -d $(CRIDIR)
|
||||
DESTDIR=$(CRIDIR) script/setup/install-cni-windows
|
||||
cp bin/* $(CRIDIR)
|
||||
else
|
||||
install-cri-deps: $(BINARIES)
|
||||
@rm -rf ${CRIDIR}
|
||||
@install -d ${CRIDIR}/usr/local/bin
|
||||
@install -D -m 755 bin/* ${CRIDIR}/usr/local/bin
|
||||
@install -d ${CRIDIR}/opt/containerd/cluster
|
||||
@$(INSTALL) -d ${CRIDIR}/usr/local/bin
|
||||
@$(INSTALL) -D -m 755 bin/* ${CRIDIR}/usr/local/bin
|
||||
@$(INSTALL) -d ${CRIDIR}/opt/containerd/cluster
|
||||
@cp -r contrib/gce ${CRIDIR}/opt/containerd/cluster/
|
||||
@install -d ${CRIDIR}/etc/systemd/system
|
||||
@install -m 644 containerd.service ${CRIDIR}/etc/systemd/system
|
||||
@$(INSTALL) -d ${CRIDIR}/etc/systemd/system
|
||||
@$(INSTALL) -m 644 containerd.service ${CRIDIR}/etc/systemd/system
|
||||
echo "CONTAINERD_VERSION: '$(VERSION:v%=%)'" | tee ${CRIDIR}/opt/containerd/cluster/version
|
||||
|
||||
DESTDIR=$(CRIDIR) script/setup/install-runc
|
||||
|
@ -291,8 +309,8 @@ install-cri-deps: $(BINARIES)
|
|||
DESTDIR=$(CRIDIR) script/setup/install-critools
|
||||
DESTDIR=$(CRIDIR) script/setup/install-imgcrypt
|
||||
|
||||
@install -d $(CRIDIR)/bin
|
||||
@install $(BINARIES) $(CRIDIR)/bin
|
||||
@$(INSTALL) -d $(CRIDIR)/bin
|
||||
@$(INSTALL) $(BINARIES) $(CRIDIR)/bin
|
||||
endif
|
||||
|
||||
ifeq ($(GOOS),windows)
|
||||
|
@ -345,12 +363,12 @@ clean-test: ## clean up debris from previously failed tests
|
|||
|
||||
install: ## install binaries
|
||||
@echo "$(WHALE) $@ $(BINARIES)"
|
||||
@mkdir -p $(DESTDIR)/bin
|
||||
@install $(BINARIES) $(DESTDIR)/bin
|
||||
@$(INSTALL) -d $(DESTDIR)$(PREFIX)/bin
|
||||
@$(INSTALL) $(BINARIES) $(DESTDIR)$(PREFIX)/bin
|
||||
|
||||
uninstall:
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f $(addprefix $(DESTDIR)/bin/,$(notdir $(BINARIES)))
|
||||
@rm -f $(addprefix $(DESTDIR)$(PREFIX)/bin/,$(notdir $(BINARIES)))
|
||||
|
||||
ifeq ($(GOOS),windows)
|
||||
install-deps:
|
||||
|
@ -394,10 +412,23 @@ root-coverage: ## generate coverage profiles for unit tests that require root
|
|||
fi; \
|
||||
done )
|
||||
|
||||
vendor: ## vendor
|
||||
vendor: ## ensure all the go.mod/go.sum files are up-to-date including vendor/ directory
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GO) mod tidy
|
||||
@$(GO) mod vendor
|
||||
@$(GO) mod verify
|
||||
@(cd ${ROOTDIR}/integration/client && ${GO} mod tidy)
|
||||
|
||||
verify-vendor: ## verify if all the go.mod/go.sum files are up-to-date
|
||||
@echo "$(WHALE) $@"
|
||||
$(eval TMPDIR := $(shell mktemp -d))
|
||||
@cp -R ${ROOTDIR} ${TMPDIR}
|
||||
@(cd ${TMPDIR}/containerd && ${GO} mod tidy)
|
||||
@(cd ${TMPDIR}/containerd/integration/client && ${GO} mod tidy)
|
||||
@diff -r -u -q ${ROOTDIR} ${TMPDIR}/containerd
|
||||
@rm -rf ${TMPDIR}
|
||||
@${ROOTDIR}/script/verify-go-modules.sh integration/client
|
||||
|
||||
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort
|
||||
|
|
2
vendor/github.com/containerd/containerd/Makefile.linux
generated
vendored
2
vendor/github.com/containerd/containerd/Makefile.linux
generated
vendored
|
@ -20,7 +20,7 @@ COMMANDS += containerd-shim containerd-shim-runc-v1 containerd-shim-runc-v2
|
|||
|
||||
# check GOOS for cross compile builds
|
||||
ifeq ($(GOOS),linux)
|
||||
ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64 riscv64))
|
||||
ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64))
|
||||
GO_GCFLAGS += -buildmode=pie
|
||||
endif
|
||||
endif
|
||||
|
|
6
vendor/github.com/containerd/containerd/Makefile.windows
generated
vendored
6
vendor/github.com/containerd/containerd/Makefile.windows
generated
vendored
|
@ -22,7 +22,11 @@ ifeq ($(GOARCH),amd64)
|
|||
TESTFLAGS_RACE= -race
|
||||
endif
|
||||
|
||||
BINARIES:=$(addsuffix .exe,$(BINARIES))
|
||||
WINDOWS_SHIM=bin/containerd-shim-runhcs-v1.exe
|
||||
BINARIES := $(addsuffix .exe,$(BINARIES)) $(WINDOWS_SHIM)
|
||||
|
||||
$(WINDOWS_SHIM): script/setup/install-runhcs-shim go.mod
|
||||
DESTDIR=$(CURDIR)/bin $<
|
||||
|
||||
bin/%.exe: cmd/% FORCE
|
||||
$(BUILD_BINARY)
|
||||
|
|
17
vendor/github.com/containerd/containerd/Protobuild.toml
generated
vendored
17
vendor/github.com/containerd/containerd/Protobuild.toml
generated
vendored
|
@ -31,28 +31,11 @@ plugins = ["grpc", "fieldpath"]
|
|||
"google/protobuf/duration.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/rpc/status.proto" = "github.com/gogo/googleapis/google/rpc"
|
||||
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/containerd/containerd/api/events"]
|
||||
plugins = ["fieldpath"] # disable grpc for this package
|
||||
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/containerd/containerd/api/services/ttrpc/events/v1"]
|
||||
plugins = ["ttrpc", "fieldpath"]
|
||||
|
||||
[[overrides]]
|
||||
# enable ttrpc and disable fieldpath and grpc for the shim
|
||||
prefixes = ["github.com/containerd/containerd/runtime/v1/shim/v1", "github.com/containerd/containerd/runtime/v2/task"]
|
||||
plugins = ["ttrpc"]
|
||||
|
||||
# Aggregrate the API descriptors to lock down API changes.
|
||||
[[descriptors]]
|
||||
prefix = "github.com/containerd/containerd/api"
|
||||
target = "api/next.pb.txt"
|
||||
ignore_files = [
|
||||
"google/protobuf/descriptor.proto",
|
||||
"gogoproto/gogo.proto"
|
||||
]
|
||||
|
||||
# Lock down runc config
|
||||
[[descriptors]]
|
||||
prefix = "github.com/containerd/containerd/runtime/linux/runctypes"
|
||||
|
|
31
vendor/github.com/containerd/containerd/README.md
generated
vendored
31
vendor/github.com/containerd/containerd/README.md
generated
vendored
|
@ -1,9 +1,9 @@
|
|||

|
||||

|
||||

|
||||
|
||||
[](https://pkg.go.dev/github.com/containerd/containerd)
|
||||
[](https://github.com/containerd/containerd/actions?query=workflow%3ACI)
|
||||
[](https://github.com/containerd/containerd/actions?query=workflow%3ANightly)
|
||||
[](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
|
||||
[](https://goreportcard.com/report/github.com/containerd/containerd)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/1271)
|
||||
|
||||
|
@ -21,7 +21,7 @@ We are a large inclusive OSS project that is welcoming help of any kind shape or
|
|||
* Documentation help is needed to make the product easier to consume and extend.
|
||||
* We need OSS community outreach / organizing help to get the word out; manage
|
||||
and create messaging and educational content; and to help with social media, community forums/groups, and google groups.
|
||||
* We are actively inviting new [security advisors](https://github.com/containerd/project/blob/master/GOVERNANCE.md#security-advisors) to join the team.
|
||||
* We are actively inviting new [security advisors](https://github.com/containerd/project/blob/main/GOVERNANCE.md#security-advisors) to join the team.
|
||||
* New sub-projects are being created, core and non-core that could use additional development help.
|
||||
* Each of the [containerd projects](https://github.com/containerd) has a list of issues currently being worked on or that need help resolving.
|
||||
- If the issue has not already been assigned to someone, or has not made recent progress and you are interested, please inquire.
|
||||
|
@ -41,7 +41,7 @@ If you are interested in trying out containerd see our example at [Getting Start
|
|||
## Nightly builds
|
||||
|
||||
There are nightly builds available for download [here](https://github.com/containerd/containerd/actions?query=workflow%3ANightly).
|
||||
Binaries are generated from `master` branch every night for `Linux` and `Windows`.
|
||||
Binaries are generated from `main` branch every night for `Linux` and `Windows`.
|
||||
|
||||
Please be aware: nightly builds might have critical bugs, it's not recommended for use in production and no support provided.
|
||||
|
||||
|
@ -68,6 +68,14 @@ your system. See more details in [Checkpoint and Restore](#checkpoint-and-restor
|
|||
|
||||
Build requirements for developers are listed in [BUILDING](BUILDING.md).
|
||||
|
||||
|
||||
## Supported Registries
|
||||
|
||||
Any registry which is compliant with the [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec)
|
||||
is supported by containerd.
|
||||
|
||||
For configuring registries, see [registry host configuration documentation](docs/hosts.md)
|
||||
|
||||
## Features
|
||||
|
||||
### Client
|
||||
|
@ -77,8 +85,11 @@ containerd offers a full client package to help you integrate containerd into yo
|
|||
```go
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
)
|
||||
|
||||
|
||||
|
@ -269,7 +280,7 @@ loaded for the user's shell environment.
|
|||
`cri` is a native plugin of containerd. Since containerd 1.1, the cri plugin is built into the release binaries and enabled by default.
|
||||
|
||||
> **Note:** As of containerd 1.5, the `cri` plugin is merged into the containerd/containerd repo. For example, the source code previously stored under [`containerd/cri/pkg`](https://github.com/containerd/cri/tree/release/1.4/pkg)
|
||||
was moved to [`containerd/containerd/pkg/cri` package](https://github.com/containerd/containerd/tree/master/pkg/cri).
|
||||
was moved to [`containerd/containerd/pkg/cri` package](https://github.com/containerd/containerd/tree/main/pkg/cri).
|
||||
|
||||
The `cri` plugin has reached GA status, representing that it is:
|
||||
* Feature complete
|
||||
|
@ -289,7 +300,7 @@ A Kubernetes incubator project, [cri-tools](https://github.com/kubernetes-sigs/c
|
|||
* [CRI Plugin Testing Guide](./docs/cri/testing.md)
|
||||
* [Debugging Pods, Containers, and Images with `crictl`](./docs/cri/crictl.md)
|
||||
* [Configuring `cri` Plugins](./docs/cri/config.md)
|
||||
* [Configuring containerd](https://github.com/containerd/containerd/blob/master/docs/man/containerd-config.8.md)
|
||||
* [Configuring containerd](https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.8.md)
|
||||
|
||||
### Communication
|
||||
|
||||
|
@ -315,14 +326,14 @@ copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by
|
|||
|
||||
## Project details
|
||||
|
||||
**containerd** is the primary open source project within the broader containerd GitHub repository.
|
||||
**containerd** is the primary open source project within the broader containerd GitHub organization.
|
||||
However, all projects within the repo have common maintainership, governance, and contributing
|
||||
guidelines which are stored in a `project` repository commonly for all containerd projects.
|
||||
|
||||
Please find all these core project documents, including the:
|
||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||
* [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
||||
|
|
62
vendor/github.com/containerd/containerd/RELEASES.md
generated
vendored
62
vendor/github.com/containerd/containerd/RELEASES.md
generated
vendored
|
@ -27,7 +27,7 @@ considered "pre-releases".
|
|||
|
||||
### Major and Minor Releases
|
||||
|
||||
Major and minor releases of containerd will be made from master. Releases of
|
||||
Major and minor releases of containerd will be made from main. Releases of
|
||||
containerd will be marked with GPG signed tags and announced at
|
||||
https://github.com/containerd/containerd/releases. The tag will be of the
|
||||
format `v<major>.<minor>.<patch>` and should be made with the command `git tag
|
||||
|
@ -43,7 +43,7 @@ done against that branch.
|
|||
|
||||
Pre-releases, such as alphas, betas and release candidates will be conducted
|
||||
from their source branch. For major and minor releases, these releases will be
|
||||
done from master. For patch releases, these pre-releases should be done within
|
||||
done from main. For patch releases, these pre-releases should be done within
|
||||
the corresponding release branch.
|
||||
|
||||
While pre-releases are done to assist in the stabilization process, no
|
||||
|
@ -89,7 +89,7 @@ whichever is longer. Additionally, releases may have an extended security suppor
|
|||
period after the end of the active period to accept security backports. This
|
||||
timeframe will be decided by maintainers before the end of the active status.
|
||||
|
||||
The current state is available in the following table:
|
||||
The current state is available in the following tables:
|
||||
|
||||
| Release | Status | Start | End of Life |
|
||||
|---------|-------------|------------------|-------------------|
|
||||
|
@ -100,12 +100,27 @@ The current state is available in the following table:
|
|||
| [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.8) | End of Life | April 23, 2018 | October 23, 2019 |
|
||||
| [1.2](https://github.com/containerd/containerd/releases/tag/v1.2.13) | End of Life | October 24, 2018 | October 15, 2020 |
|
||||
| [1.3](https://github.com/containerd/containerd/releases/tag/v1.3.10) | End of Life | September 26, 2019 | March 4, 2021 |
|
||||
| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.4) | Active | August 17, 2020 | max(August 17, 2021, release of 1.5.0 + 6 months) |
|
||||
| [1.5](https://github.com/containerd/containerd/milestone/30) | Next | TBD | max(TBD+1 year, release of 1.6.0 + 6 months) |
|
||||
| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.12) | Extended | August 17, 2020 | March 3, 2022 (Extended) |
|
||||
| [1.5](https://github.com/containerd/containerd/releases/tag/v1.5.9) | Active | May 3, 2021 | October 28, 2022 |
|
||||
| [1.6](https://github.com/containerd/containerd/releases/tag/v1.6.0) | Active | February 15, 2022 | max(February 15, 2023 or release of 1.7.0 + 6 months) |
|
||||
| [1.7](https://github.com/containerd/containerd/milestone/42) | Next | TBD | TBD |
|
||||
|
||||
Note that branches and release from before 1.0 may not follow these rules.
|
||||
|
||||
This table should be updated as part of the release preparation process.
|
||||
| CRI-Containerd Version | Containerd Version | Kubernetes Version | CRI Version |
|
||||
|------------------------|--------------------|--------------------|--------------|
|
||||
| v1.0.0-alpha.x | | 1.7, 1.8 | v1alpha1 |
|
||||
| v1.0.0-beta.x | | 1.9 | v1alpha1 |
|
||||
| End-Of-Life | v1.1 (End-Of-Life) | 1.10+ | v1alpha2 |
|
||||
| | v1.2 (End-Of-Life) | 1.10+ | v1alpha2 |
|
||||
| | v1.3 (End-Of-Life) | 1.12+ | v1alpha2 |
|
||||
| | v1.4 | 1.19+ | v1alpha2 |
|
||||
| | v1.5 | 1.20+ | v1alpha2 |
|
||||
| | v1.6 | 1.23+ | v1, v1alpha2 |
|
||||
|
||||
**Note:** The support table above specifies the Kubernetes Version that was supported at time of release of the containerd - cri integration and Kubernetes only supports n-3 minor release versions.
|
||||
|
||||
These tables should be updated as part of the release preparation process.
|
||||
|
||||
### Backporting
|
||||
|
||||
|
@ -115,11 +130,11 @@ will be features for the next _minor_ or _major_ release. For the most part,
|
|||
this process is straightforward and we are here to help make it as smooth as
|
||||
possible.
|
||||
|
||||
If there are important fixes that need to be backported, please let use know in
|
||||
If there are important fixes that need to be backported, please let us know in
|
||||
one of three ways:
|
||||
|
||||
1. Open an issue.
|
||||
2. Open a PR with cherry-picked change from master.
|
||||
2. Open a PR with cherry-picked change from main.
|
||||
3. Open a PR with a ported fix.
|
||||
|
||||
__If you are reporting a security issue, please reach out discreetly at security@containerd.io__.
|
||||
|
@ -127,10 +142,10 @@ Remember that backported PRs must follow the versioning guidelines from this doc
|
|||
|
||||
Any release that is "active" can accept backports. Opening a backport PR is
|
||||
fairly straightforward. The steps differ depending on whether you are pulling
|
||||
a fix from master or need to draft a new commit specific to a particular
|
||||
a fix from main or need to draft a new commit specific to a particular
|
||||
branch.
|
||||
|
||||
To cherry pick a straightforward commit from master, simply use the cherry pick
|
||||
To cherry pick a straightforward commit from main, simply use the cherry pick
|
||||
process:
|
||||
|
||||
1. Pick the branch to which you want backported, usually in the format
|
||||
|
@ -154,7 +169,7 @@ process:
|
|||
```
|
||||
|
||||
Make sure to replace `stevvooe` with whatever fork you are using to open
|
||||
the PR. When you open the PR, make sure to switch `master` with whatever
|
||||
the PR. When you open the PR, make sure to switch `main` with whatever
|
||||
release branch you are targeting with the fix. Make sure the PR title has
|
||||
`[<release branch>]` prefixed. e.g.:
|
||||
|
||||
|
@ -162,11 +177,11 @@ process:
|
|||
[release/1.4] Fix foo in bar
|
||||
```
|
||||
|
||||
If there is no existing fix in master, you should first fix the bug in master,
|
||||
If there is no existing fix in main, you should first fix the bug in main,
|
||||
or ask us a maintainer or contributor to do it via an issue. Once that PR is
|
||||
completed, open a PR using the process above.
|
||||
|
||||
Only when the bug is not seen in master and must be made for the specific
|
||||
Only when the bug is not seen in main and must be made for the specific
|
||||
release branch should you open a PR with new code.
|
||||
|
||||
## Public API Stability
|
||||
|
@ -177,12 +192,12 @@ containerd versions:
|
|||
|
||||
| Component | Status | Stabilized Version | Links |
|
||||
|------------------|----------|--------------------|---------------|
|
||||
| GRPC API | Stable | 1.0 | [api/](api) |
|
||||
| GRPC API | Stable | 1.0 | [gRPC API](#grpc-api) |
|
||||
| Metrics API | Stable | 1.0 | - |
|
||||
| Runtime Shim API | Stable | 1.2 | - |
|
||||
| Daemon Config | Stable | 1.0 | - |
|
||||
| Daemon Config | Stable | 1.0 | - |
|
||||
| CRI GRPC API | Stable | 1.6 (_CRI v1_) | [cri-api](https://github.com/kubernetes/cri-api/tree/master/pkg/apis/runtime/v1) |
|
||||
| Go client API | Unstable | _future_ | [godoc](https://godoc.org/github.com/containerd/containerd) |
|
||||
| CRI GRPC API | Unstable | v1alpha2 _current_ | [cri-api](https://github.com/kubernetes/cri-api/tree/master/pkg/apis/runtime/v1alpha2) |
|
||||
| `ctr` tool | Unstable | Out of scope | - |
|
||||
|
||||
From the version stated in the above table, that component must adhere to the
|
||||
|
@ -201,7 +216,7 @@ version jump.
|
|||
To ensure compatibility, we have collected the entire GRPC API symbol set into
|
||||
a single file. At each _minor_ release of containerd, we will move the current
|
||||
`next.pb.txt` file to a file named for the minor version, such as `1.0.pb.txt`,
|
||||
enumerating the support services and messages. See [api/](api) for details.
|
||||
enumerating the support services and messages.
|
||||
|
||||
Note that new services may be added in _minor_ releases. New service methods
|
||||
and new fields on messages may be added if they are optional.
|
||||
|
@ -321,9 +336,10 @@ against total impact.
|
|||
|
||||
The deprecated features are shown in the following table:
|
||||
|
||||
| Component | Deprecation release | Target release for removal | Recommendation |
|
||||
|----------------------------------------------------------------------|---------------------|----------------------------|-------------------------------|
|
||||
| Runtime V1 API and implementation (`io.containerd.runtime.v1.linux`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` |
|
||||
| Runc V1 implementation of Runtime V2 (`io.containerd.runc.v1`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` |
|
||||
| config.toml `version = 1` | containerd v1.5 | containerd v2.0 | Use config.toml `version = 2` |
|
||||
| Built-in `aufs` snapshotter | containerd v1.5 | containerd v2.0 | Use `overlayfs` snapshotter |
|
||||
| Component | Deprecation release | Target release for removal | Recommendation |
|
||||
|----------------------------------------------------------------------|---------------------|----------------------------|-----------------------------------|
|
||||
| Runtime V1 API and implementation (`io.containerd.runtime.v1.linux`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` |
|
||||
| Runc V1 implementation of Runtime V2 (`io.containerd.runc.v1`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` |
|
||||
| config.toml `version = 1` | containerd v1.5 | containerd v2.0 | Use config.toml `version = 2` |
|
||||
| Built-in `aufs` snapshotter | containerd v1.5 | containerd v2.0 | Use `overlayfs` snapshotter |
|
||||
| `cri-containerd-*.tar.gz` release bundles | containerd v1.6 | containerd v2.0 | Use `containerd-*.tar.gz` bundles |
|
||||
|
|
31
vendor/github.com/containerd/containerd/Vagrantfile
generated
vendored
31
vendor/github.com/containerd/containerd/Vagrantfile
generated
vendored
|
@ -17,7 +17,7 @@
|
|||
|
||||
# Vagrantfile for cgroup2 and SELinux
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "fedora/34-cloud-base"
|
||||
config.vm.box = "fedora/35-cloud-base"
|
||||
memory = 4096
|
||||
cpus = 2
|
||||
config.vm.provider :virtualbox do |v|
|
||||
|
@ -77,7 +77,7 @@ Vagrant.configure("2") do |config|
|
|||
config.vm.provision "install-golang", type: "shell", run: "once" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-install-golang"
|
||||
sh.env = {
|
||||
'GO_VERSION': ENV['GO_VERSION'] || "1.16.14",
|
||||
'GO_VERSION': ENV['GO_VERSION'] || "1.17.7",
|
||||
}
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
|
@ -257,4 +257,31 @@ EOF
|
|||
SHELL
|
||||
end
|
||||
|
||||
# Rootless Podman is used for testing CRI-in-UserNS
|
||||
# (We could use rootless nerdctl, but we are using Podman here because it is available in dnf)
|
||||
config.vm.provision "install-rootless-podman", type: "shell", run: "never" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-install-rootless-podman"
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
set -eux -o pipefail
|
||||
# Delegate cgroup v2 controllers to rootless
|
||||
mkdir -p /etc/systemd/system/user@.service.d
|
||||
cat > /etc/systemd/system/user@.service.d/delegate.conf << EOF
|
||||
[Service]
|
||||
Delegate=yes
|
||||
EOF
|
||||
systemctl daemon-reload
|
||||
# Install Podman
|
||||
dnf install -y podman
|
||||
# Configure Podman to resolve `golang` to `docker.io/library/golang`
|
||||
mkdir -p /etc/containers
|
||||
cat > /etc/containers/registries.conf <<EOF
|
||||
[registries.search]
|
||||
registries = ['docker.io']
|
||||
EOF
|
||||
# Disable SELinux to allow overlayfs
|
||||
setenforce 0
|
||||
SHELL
|
||||
end
|
||||
|
||||
end
|
||||
|
|
17
vendor/github.com/containerd/containerd/api/services/containers/v1/doc.go
generated
vendored
Normal file
17
vendor/github.com/containerd/containerd/api/services/containers/v1/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containers
|
17
vendor/github.com/containerd/containerd/api/services/content/v1/doc.go
generated
vendored
Normal file
17
vendor/github.com/containerd/containerd/api/services/content/v1/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package content
|
17
vendor/github.com/containerd/containerd/api/services/diff/v1/doc.go
generated
vendored
Normal file
17
vendor/github.com/containerd/containerd/api/services/diff/v1/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diff
|
17
vendor/github.com/containerd/containerd/api/services/namespaces/v1/doc.go
generated
vendored
Normal file
17
vendor/github.com/containerd/containerd/api/services/namespaces/v1/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namespaces
|
17
vendor/github.com/containerd/containerd/api/services/snapshots/v1/doc.go
generated
vendored
Normal file
17
vendor/github.com/containerd/containerd/api/services/snapshots/v1/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package snapshots
|
17
vendor/github.com/containerd/containerd/api/services/tasks/v1/doc.go
generated
vendored
Normal file
17
vendor/github.com/containerd/containerd/api/services/tasks/v1/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tasks
|
219
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
generated
vendored
219
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
generated
vendored
|
@ -51,6 +51,7 @@ type CreateTaskRequest struct {
|
|||
Terminal bool `protobuf:"varint,7,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
||||
Checkpoint *types.Descriptor `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
|
||||
Options *types1.Any `protobuf:"bytes,9,opt,name=options,proto3" json:"options,omitempty"`
|
||||
RuntimePath string `protobuf:"bytes,10,opt,name=runtime_path,json=runtimePath,proto3" json:"runtime_path,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
|
@ -1169,93 +1170,95 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor_310e7127b8a26f14 = []byte{
|
||||
// 1376 bytes of a gzipped FileDescriptorProto
|
||||
// 1400 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0x45,
|
||||
0x14, 0xee, 0xfa, 0xee, 0xe3, 0xa4, 0x4d, 0x96, 0x34, 0x98, 0xa5, 0x8a, 0xc3, 0xf2, 0x62, 0x02,
|
||||
0x5d, 0x53, 0x17, 0x55, 0x55, 0x5b, 0x55, 0xe4, 0x46, 0x64, 0x41, 0xd5, 0x74, 0x5b, 0xa0, 0xaa,
|
||||
0x84, 0xc2, 0xc6, 0x3b, 0x71, 0x46, 0xb1, 0x77, 0xb6, 0x3b, 0xe3, 0xb4, 0xe6, 0x05, 0x7e, 0x42,
|
||||
0x5f, 0x79, 0x81, 0xbf, 0x93, 0x47, 0x1e, 0x11, 0xaa, 0x02, 0xf5, 0xbf, 0xe0, 0x0d, 0xcd, 0x65,
|
||||
0xd7, 0x1b, 0x3b, 0xf6, 0x3a, 0x4d, 0xc3, 0x4b, 0x32, 0x33, 0x7b, 0xce, 0x99, 0x33, 0xdf, 0xb9,
|
||||
0x7d, 0x09, 0xac, 0xb5, 0x30, 0xdb, 0xef, 0xee, 0x5a, 0x4d, 0xd2, 0xa9, 0x35, 0x89, 0xc7, 0x1c,
|
||||
0xec, 0xa1, 0xc0, 0x8d, 0x2f, 0x1d, 0x1f, 0xd7, 0x28, 0x0a, 0x0e, 0x71, 0x13, 0xd1, 0x1a, 0x73,
|
||||
0xe8, 0x01, 0xad, 0x1d, 0xde, 0x90, 0x0b, 0xcb, 0x0f, 0x08, 0x23, 0xfa, 0xb5, 0x81, 0xb4, 0x15,
|
||||
0x4a, 0x5a, 0x52, 0xe0, 0xf0, 0x86, 0xf1, 0x61, 0x8b, 0x90, 0x56, 0x1b, 0xd5, 0x84, 0xec, 0x6e,
|
||||
0x77, 0xaf, 0x86, 0x3a, 0x3e, 0xeb, 0x49, 0x55, 0xe3, 0x83, 0xe1, 0x8f, 0x8e, 0x17, 0x7e, 0x5a,
|
||||
0x68, 0x91, 0x16, 0x11, 0xcb, 0x1a, 0x5f, 0xa9, 0xd3, 0x5b, 0x53, 0xf9, 0xcb, 0x7a, 0x3e, 0xa2,
|
||||
0xb5, 0x0e, 0xe9, 0x7a, 0x4c, 0xe9, 0xdd, 0x3e, 0x8b, 0x1e, 0x62, 0x01, 0x6e, 0xaa, 0xd7, 0x19,
|
||||
0x77, 0xcf, 0xa0, 0xe9, 0x22, 0xda, 0x0c, 0xb0, 0xcf, 0x48, 0xa0, 0x94, 0xef, 0x9c, 0x41, 0x99,
|
||||
0x23, 0x26, 0x7e, 0x28, 0xdd, 0xca, 0x30, 0x36, 0x0c, 0x77, 0x10, 0x65, 0x4e, 0xc7, 0x97, 0x02,
|
||||
0xe6, 0x51, 0x0a, 0xe6, 0xd7, 0x03, 0xe4, 0x30, 0xf4, 0xc4, 0xa1, 0x07, 0x36, 0x7a, 0xde, 0x45,
|
||||
0x94, 0xe9, 0x75, 0x98, 0x89, 0xcc, 0xef, 0x60, 0xb7, 0xac, 0x2d, 0x6b, 0xd5, 0xe2, 0xda, 0x95,
|
||||
0xfe, 0x71, 0xa5, 0xb4, 0x1e, 0x9e, 0x37, 0x36, 0xec, 0x52, 0x24, 0xd4, 0x70, 0xf5, 0x1a, 0xe4,
|
||||
0x02, 0x42, 0xd8, 0x1e, 0x2d, 0xa7, 0x97, 0xd3, 0xd5, 0x52, 0xfd, 0x7d, 0x2b, 0x16, 0x52, 0xe1,
|
||||
0x9d, 0xf5, 0x80, 0x83, 0x69, 0x2b, 0x31, 0x7d, 0x01, 0xb2, 0x94, 0xb9, 0xd8, 0x2b, 0x67, 0xb8,
|
||||
0x75, 0x5b, 0x6e, 0xf4, 0x45, 0xc8, 0x51, 0xe6, 0x92, 0x2e, 0x2b, 0x67, 0xc5, 0xb1, 0xda, 0xa9,
|
||||
0x73, 0x14, 0x04, 0xe5, 0x5c, 0x74, 0x8e, 0x82, 0x40, 0x37, 0xa0, 0xc0, 0x50, 0xd0, 0xc1, 0x9e,
|
||||
0xd3, 0x2e, 0xe7, 0x97, 0xb5, 0x6a, 0xc1, 0x8e, 0xf6, 0xfa, 0x3d, 0x80, 0xe6, 0x3e, 0x6a, 0x1e,
|
||||
0xf8, 0x04, 0x7b, 0xac, 0x5c, 0x58, 0xd6, 0xaa, 0xa5, 0xfa, 0xb5, 0x51, 0xb7, 0x36, 0x22, 0xc4,
|
||||
0xed, 0x98, 0xbc, 0x6e, 0x41, 0x9e, 0xf8, 0x0c, 0x13, 0x8f, 0x96, 0x8b, 0x42, 0x75, 0xc1, 0x92,
|
||||
0x68, 0x5a, 0x21, 0x9a, 0xd6, 0xaa, 0xd7, 0xb3, 0x43, 0x21, 0xf3, 0x19, 0xe8, 0x71, 0x24, 0xa9,
|
||||
0x4f, 0x3c, 0x8a, 0xde, 0x0a, 0xca, 0x39, 0x48, 0xfb, 0xd8, 0x2d, 0xa7, 0x96, 0xb5, 0xea, 0xac,
|
||||
0xcd, 0x97, 0x66, 0x0b, 0x66, 0x1e, 0x33, 0x27, 0x60, 0xe7, 0x09, 0xd0, 0xc7, 0x90, 0x47, 0x2f,
|
||||
0x51, 0x73, 0x47, 0x59, 0x2e, 0xae, 0x41, 0xff, 0xb8, 0x92, 0xdb, 0x7c, 0x89, 0x9a, 0x8d, 0x0d,
|
||||
0x3b, 0xc7, 0x3f, 0x35, 0x5c, 0xf3, 0x23, 0x98, 0x55, 0x17, 0x29, 0xff, 0x95, 0x2f, 0xda, 0xc0,
|
||||
0x97, 0x2d, 0x98, 0xdf, 0x40, 0x6d, 0x74, 0xee, 0x8c, 0x31, 0x7f, 0xd3, 0xe0, 0xb2, 0xb4, 0x14,
|
||||
0xdd, 0xb6, 0x08, 0xa9, 0x48, 0x39, 0xd7, 0x3f, 0xae, 0xa4, 0x1a, 0x1b, 0x76, 0x0a, 0x9f, 0x82,
|
||||
0x88, 0x5e, 0x81, 0x12, 0x7a, 0x89, 0xd9, 0x0e, 0x65, 0x0e, 0xeb, 0xf2, 0x9c, 0xe3, 0x5f, 0x80,
|
||||
0x1f, 0x3d, 0x16, 0x27, 0xfa, 0x2a, 0x14, 0xf9, 0x0e, 0xb9, 0x3b, 0x0e, 0x13, 0x29, 0x56, 0xaa,
|
||||
0x1b, 0x23, 0x01, 0x7c, 0x12, 0x96, 0xc3, 0x5a, 0xe1, 0xe8, 0xb8, 0x72, 0xe9, 0xd5, 0xdf, 0x15,
|
||||
0xcd, 0x2e, 0x48, 0xb5, 0x55, 0x66, 0x12, 0x58, 0x90, 0xfe, 0x6d, 0x07, 0xa4, 0x89, 0x28, 0xbd,
|
||||
0x70, 0xf4, 0x11, 0xc0, 0x16, 0xba, 0xf8, 0x20, 0x6f, 0x42, 0x49, 0x5c, 0xa3, 0x40, 0xbf, 0x05,
|
||||
0x79, 0x5f, 0x3e, 0x50, 0x5c, 0x31, 0x54, 0x23, 0x87, 0x37, 0x54, 0x99, 0x84, 0x20, 0x84, 0xc2,
|
||||
0xe6, 0x0a, 0xcc, 0x7d, 0x83, 0x29, 0xe3, 0x69, 0x10, 0x41, 0xb3, 0x08, 0xb9, 0x3d, 0xdc, 0x66,
|
||||
0x28, 0x90, 0xde, 0xda, 0x6a, 0xc7, 0x93, 0x26, 0x26, 0x1b, 0xd5, 0x46, 0x56, 0xb4, 0xf8, 0xb2,
|
||||
0x26, 0x3a, 0xc6, 0xe4, 0x6b, 0xa5, 0xa8, 0xf9, 0x4a, 0x83, 0xd2, 0xd7, 0xb8, 0xdd, 0xbe, 0x68,
|
||||
0x90, 0x44, 0xc3, 0xc1, 0x2d, 0xde, 0x56, 0x64, 0x6e, 0xa9, 0x1d, 0x4f, 0x45, 0xa7, 0xdd, 0x16,
|
||||
0x19, 0x55, 0xb0, 0xf9, 0xd2, 0xfc, 0x57, 0x03, 0x9d, 0x2b, 0xbf, 0x83, 0x2c, 0x89, 0x7a, 0x62,
|
||||
0xea, 0xf4, 0x9e, 0x98, 0x1e, 0xd3, 0x13, 0x33, 0x63, 0x7b, 0x62, 0x76, 0xa8, 0x27, 0x56, 0x21,
|
||||
0x43, 0x7d, 0xd4, 0x14, 0x5d, 0x74, 0x5c, 0x4b, 0x13, 0x12, 0x71, 0x94, 0xf2, 0x63, 0x53, 0xe9,
|
||||
0x2a, 0xbc, 0x77, 0xe2, 0xe9, 0x32, 0xb2, 0xe6, 0xaf, 0x1a, 0xcc, 0xd9, 0x88, 0xe2, 0x9f, 0xd0,
|
||||
0x36, 0xeb, 0x5d, 0x78, 0xa8, 0x16, 0x20, 0xfb, 0x02, 0xbb, 0x6c, 0x5f, 0x45, 0x4a, 0x6e, 0x38,
|
||||
0x3a, 0xfb, 0x08, 0xb7, 0xf6, 0x65, 0xf5, 0xcf, 0xda, 0x6a, 0x67, 0xfe, 0x0c, 0x97, 0xd7, 0xdb,
|
||||
0x84, 0xa2, 0xc6, 0xc3, 0xff, 0xc3, 0x31, 0x19, 0xce, 0xb4, 0x88, 0x82, 0xdc, 0x98, 0x5f, 0xc1,
|
||||
0xdc, 0xb6, 0xd3, 0xa5, 0xe7, 0xee, 0x9f, 0x5b, 0x30, 0x6f, 0x23, 0xda, 0xed, 0x9c, 0xdb, 0xd0,
|
||||
0x26, 0x5c, 0xe1, 0xc5, 0xb9, 0x8d, 0xdd, 0xf3, 0x24, 0xaf, 0x69, 0xcb, 0x7e, 0x20, 0xcd, 0xa8,
|
||||
0x12, 0xbf, 0x0f, 0x45, 0xd5, 0x2e, 0x50, 0x58, 0xe6, 0xcb, 0x93, 0xca, 0xbc, 0xe1, 0xed, 0x11,
|
||||
0x7b, 0xa0, 0x62, 0xbe, 0xd6, 0xe0, 0xea, 0x7a, 0x34, 0x93, 0xcf, 0xcb, 0x51, 0x76, 0x60, 0xde,
|
||||
0x77, 0x02, 0xe4, 0xb1, 0x9d, 0x18, 0x2f, 0x90, 0xe1, 0xab, 0xf3, 0xfe, 0xff, 0xd7, 0x71, 0x65,
|
||||
0x25, 0xc6, 0xb6, 0x88, 0x8f, 0xbc, 0x48, 0x9d, 0xd6, 0x5a, 0xe4, 0xba, 0x8b, 0x5b, 0x88, 0x32,
|
||||
0x6b, 0x43, 0xfc, 0xb2, 0xe7, 0xa4, 0xb1, 0xf5, 0x53, 0x39, 0x43, 0x7a, 0x1a, 0xce, 0xf0, 0x14,
|
||||
0x16, 0x87, 0x5f, 0x17, 0x01, 0x57, 0x1a, 0x30, 0xc1, 0x53, 0x3b, 0xe4, 0x08, 0x79, 0x89, 0x2b,
|
||||
0x98, 0xbf, 0xa7, 0x60, 0xfe, 0x5b, 0xdf, 0x7d, 0x07, 0xc4, 0xae, 0x0e, 0xc5, 0x00, 0x51, 0xd2,
|
||||
0x0d, 0x9a, 0x88, 0x0a, 0xb0, 0xc6, 0xbd, 0x6a, 0x20, 0xa6, 0xef, 0x42, 0xc9, 0xf1, 0x3c, 0xc2,
|
||||
0x9c, 0x10, 0x0b, 0xee, 0xfd, 0x97, 0xd6, 0x24, 0x92, 0x6f, 0x8d, 0x78, 0x6b, 0xad, 0x0e, 0x4c,
|
||||
0x6c, 0x7a, 0x2c, 0xe8, 0xd9, 0x71, 0xa3, 0xc6, 0x7d, 0x98, 0x1b, 0x16, 0xe0, 0xcd, 0xf9, 0x00,
|
||||
0xf5, 0xd4, 0xec, 0xe1, 0x4b, 0x5e, 0x82, 0x87, 0x4e, 0xbb, 0x8b, 0xc2, 0x8e, 0x2a, 0x36, 0x77,
|
||||
0x52, 0xb7, 0x35, 0x73, 0x05, 0x2e, 0x3f, 0x90, 0x2c, 0x3d, 0x44, 0xa7, 0x0c, 0x79, 0x39, 0xae,
|
||||
0x24, 0xde, 0x45, 0x3b, 0xdc, 0xf2, 0x0a, 0x89, 0x64, 0xa3, 0xe1, 0x95, 0x57, 0x24, 0x5f, 0x05,
|
||||
0xa7, 0x7c, 0x0a, 0xe1, 0x15, 0x02, 0x76, 0x28, 0x68, 0xee, 0x41, 0xe9, 0x7b, 0x07, 0x5f, 0xfc,
|
||||
0x80, 0x0f, 0x60, 0x46, 0xde, 0xa3, 0x7c, 0x1d, 0x22, 0x4b, 0xda, 0x64, 0xb2, 0x94, 0x7a, 0x1b,
|
||||
0xb2, 0x54, 0x7f, 0x3d, 0x03, 0x59, 0x31, 0xde, 0xf5, 0x03, 0xc8, 0x49, 0x22, 0xac, 0xd7, 0x26,
|
||||
0x47, 0x7c, 0xe4, 0x0f, 0x0f, 0xe3, 0xf3, 0xe9, 0x15, 0xd4, 0xd3, 0x7e, 0x84, 0xac, 0x20, 0xac,
|
||||
0xfa, 0xca, 0x64, 0xd5, 0x38, 0x7d, 0x36, 0x3e, 0x9d, 0x4a, 0x56, 0xdd, 0xd0, 0x82, 0x9c, 0x64,
|
||||
0x81, 0x49, 0xcf, 0x19, 0x61, 0xc5, 0xc6, 0x67, 0xd3, 0x28, 0x44, 0x17, 0x3d, 0x87, 0xd9, 0x13,
|
||||
0x74, 0x53, 0xaf, 0x4f, 0xa3, 0x7e, 0x92, 0x75, 0x9c, 0xf1, 0xca, 0x67, 0x90, 0xde, 0x42, 0x4c,
|
||||
0xaf, 0x4e, 0x56, 0x1a, 0x70, 0x52, 0xe3, 0x93, 0x29, 0x24, 0x23, 0xdc, 0x32, 0x7c, 0x1c, 0xe8,
|
||||
0xd6, 0x64, 0x95, 0x61, 0x0a, 0x69, 0xd4, 0xa6, 0x96, 0x57, 0x17, 0x35, 0x20, 0xc3, 0x19, 0xa1,
|
||||
0x9e, 0xe0, 0x5b, 0x8c, 0x35, 0x1a, 0x8b, 0x23, 0xc9, 0xbd, 0xd9, 0xf1, 0x59, 0x4f, 0xdf, 0x86,
|
||||
0x0c, 0x2f, 0x25, 0x3d, 0x21, 0x0f, 0x47, 0xd9, 0xde, 0x58, 0x8b, 0x8f, 0xa1, 0x18, 0x11, 0xa1,
|
||||
0x24, 0x28, 0x86, 0x19, 0xd3, 0x58, 0xa3, 0x0f, 0x21, 0xaf, 0x28, 0x8c, 0x9e, 0x10, 0xef, 0x93,
|
||||
0x4c, 0x67, 0x82, 0xc1, 0xac, 0xa0, 0x24, 0x49, 0x1e, 0x0e, 0xf3, 0x96, 0xb1, 0x06, 0x1f, 0x41,
|
||||
0x4e, 0x72, 0x93, 0xa4, 0xa2, 0x19, 0x61, 0x30, 0x63, 0x4d, 0x62, 0x28, 0x84, 0xf4, 0x42, 0xbf,
|
||||
0x9e, 0x9c, 0x23, 0x31, 0x36, 0x63, 0x58, 0xd3, 0x8a, 0xab, 0x8c, 0x7a, 0x01, 0x10, 0x1b, 0xea,
|
||||
0x37, 0x13, 0x20, 0x3e, 0x8d, 0x9e, 0x18, 0x5f, 0x9c, 0x4d, 0x49, 0x5d, 0xfc, 0x08, 0x72, 0x72,
|
||||
0x0c, 0x26, 0xc1, 0x36, 0x32, 0x2c, 0xc7, 0xc2, 0xb6, 0x07, 0x79, 0x35, 0xba, 0x92, 0x72, 0xe5,
|
||||
0xe4, 0x34, 0x34, 0xae, 0x4f, 0x29, 0xad, 0x5c, 0xff, 0x01, 0x32, 0x7c, 0xe6, 0x24, 0x55, 0x61,
|
||||
0x6c, 0xfe, 0x19, 0x2b, 0xd3, 0x88, 0x4a, 0xf3, 0x6b, 0xdf, 0x1d, 0xbd, 0x59, 0xba, 0xf4, 0xe7,
|
||||
0x9b, 0xa5, 0x4b, 0xbf, 0xf4, 0x97, 0xb4, 0xa3, 0xfe, 0x92, 0xf6, 0x47, 0x7f, 0x49, 0xfb, 0xa7,
|
||||
0xbf, 0xa4, 0x3d, 0xbb, 0xf7, 0x76, 0xff, 0x7e, 0xbc, 0x2b, 0x16, 0x4f, 0x53, 0xbb, 0x39, 0x01,
|
||||
0xd8, 0xcd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x55, 0xbf, 0x54, 0xc7, 0x14, 0x00, 0x00,
|
||||
0x14, 0xee, 0xfa, 0xee, 0xe3, 0xa4, 0x4d, 0x96, 0x34, 0x98, 0xa5, 0x8a, 0xd3, 0xe5, 0xc5, 0x04,
|
||||
0xba, 0xa6, 0x2e, 0xaa, 0xaa, 0xb6, 0xaa, 0xc8, 0x8d, 0xc8, 0x82, 0xaa, 0xe9, 0xb6, 0x40, 0x55,
|
||||
0x09, 0x99, 0x8d, 0x77, 0x62, 0x8f, 0x62, 0xef, 0x6c, 0x77, 0xc6, 0x69, 0xcd, 0x0b, 0xfc, 0x84,
|
||||
0xbe, 0xf2, 0x02, 0x7f, 0xa7, 0x8f, 0x3c, 0x22, 0x54, 0x05, 0xea, 0x57, 0x7e, 0x01, 0x6f, 0x68,
|
||||
0x2e, 0xbb, 0xde, 0xd8, 0xf1, 0x25, 0x4d, 0xc3, 0x4b, 0x3b, 0x33, 0x7b, 0xce, 0x99, 0x33, 0xdf,
|
||||
0xb9, 0x7d, 0x0e, 0x6c, 0x34, 0x31, 0x6b, 0x75, 0xf7, 0xac, 0x06, 0xe9, 0x54, 0x1a, 0xc4, 0x63,
|
||||
0x0e, 0xf6, 0x50, 0xe0, 0xc6, 0x97, 0x8e, 0x8f, 0x2b, 0x14, 0x05, 0x87, 0xb8, 0x81, 0x68, 0x85,
|
||||
0x39, 0xf4, 0x80, 0x56, 0x0e, 0xaf, 0xcb, 0x85, 0xe5, 0x07, 0x84, 0x11, 0xfd, 0xca, 0x40, 0xda,
|
||||
0x0a, 0x25, 0x2d, 0x29, 0x70, 0x78, 0xdd, 0xf8, 0xb0, 0x49, 0x48, 0xb3, 0x8d, 0x2a, 0x42, 0x76,
|
||||
0xaf, 0xbb, 0x5f, 0x41, 0x1d, 0x9f, 0xf5, 0xa4, 0xaa, 0xf1, 0xc1, 0xf0, 0x47, 0xc7, 0x0b, 0x3f,
|
||||
0x2d, 0x35, 0x49, 0x93, 0x88, 0x65, 0x85, 0xaf, 0xd4, 0xe9, 0xcd, 0x99, 0xfc, 0x65, 0x3d, 0x1f,
|
||||
0xd1, 0x4a, 0x87, 0x74, 0x3d, 0xa6, 0xf4, 0x6e, 0x9d, 0x46, 0x0f, 0xb1, 0x00, 0x37, 0xd4, 0xeb,
|
||||
0x8c, 0x3b, 0xa7, 0xd0, 0x74, 0x11, 0x6d, 0x04, 0xd8, 0x67, 0x24, 0x50, 0xca, 0xb7, 0x4f, 0xa1,
|
||||
0xcc, 0x11, 0x13, 0xff, 0x28, 0xdd, 0xd2, 0x30, 0x36, 0x0c, 0x77, 0x10, 0x65, 0x4e, 0xc7, 0x97,
|
||||
0x02, 0xe6, 0x3f, 0x09, 0x58, 0xdc, 0x0c, 0x90, 0xc3, 0xd0, 0x63, 0x87, 0x1e, 0xd8, 0xe8, 0x59,
|
||||
0x17, 0x51, 0xa6, 0x57, 0x61, 0x2e, 0x32, 0x5f, 0xc7, 0x6e, 0x51, 0x5b, 0xd5, 0xca, 0xf9, 0x8d,
|
||||
0x4b, 0xfd, 0xa3, 0x52, 0x61, 0x33, 0x3c, 0xaf, 0x6d, 0xd9, 0x85, 0x48, 0xa8, 0xe6, 0xea, 0x15,
|
||||
0xc8, 0x04, 0x84, 0xb0, 0x7d, 0x5a, 0x4c, 0xae, 0x26, 0xcb, 0x85, 0xea, 0xfb, 0x56, 0x2c, 0xa4,
|
||||
0xc2, 0x3b, 0xeb, 0x3e, 0x07, 0xd3, 0x56, 0x62, 0xfa, 0x12, 0xa4, 0x29, 0x73, 0xb1, 0x57, 0x4c,
|
||||
0x71, 0xeb, 0xb6, 0xdc, 0xe8, 0xcb, 0x90, 0xa1, 0xcc, 0x25, 0x5d, 0x56, 0x4c, 0x8b, 0x63, 0xb5,
|
||||
0x53, 0xe7, 0x28, 0x08, 0x8a, 0x99, 0xe8, 0x1c, 0x05, 0x81, 0x6e, 0x40, 0x8e, 0xa1, 0xa0, 0x83,
|
||||
0x3d, 0xa7, 0x5d, 0xcc, 0xae, 0x6a, 0xe5, 0x9c, 0x1d, 0xed, 0xf5, 0xbb, 0x00, 0x8d, 0x16, 0x6a,
|
||||
0x1c, 0xf8, 0x04, 0x7b, 0xac, 0x98, 0x5b, 0xd5, 0xca, 0x85, 0xea, 0x95, 0x51, 0xb7, 0xb6, 0x22,
|
||||
0xc4, 0xed, 0x98, 0xbc, 0x6e, 0x41, 0x96, 0xf8, 0x0c, 0x13, 0x8f, 0x16, 0xf3, 0x42, 0x75, 0xc9,
|
||||
0x92, 0x68, 0x5a, 0x21, 0x9a, 0xd6, 0xba, 0xd7, 0xb3, 0x43, 0x21, 0xfd, 0x2a, 0xcc, 0x05, 0x5d,
|
||||
0x8f, 0x03, 0x5c, 0xf7, 0x1d, 0xd6, 0x2a, 0x82, 0xf0, 0xb3, 0xa0, 0xce, 0x76, 0x1d, 0xd6, 0x32,
|
||||
0x9f, 0x82, 0x1e, 0x07, 0x9b, 0xfa, 0xc4, 0xa3, 0xe8, 0xad, 0xd0, 0x5e, 0x80, 0xa4, 0x8f, 0xdd,
|
||||
0x62, 0x62, 0x55, 0x2b, 0xcf, 0xdb, 0x7c, 0x69, 0x36, 0x61, 0xee, 0x11, 0x73, 0x02, 0x76, 0x96,
|
||||
0x18, 0x7e, 0x04, 0x59, 0xf4, 0x02, 0x35, 0xea, 0xca, 0x72, 0x7e, 0x03, 0xfa, 0x47, 0xa5, 0xcc,
|
||||
0xf6, 0x0b, 0xd4, 0xa8, 0x6d, 0xd9, 0x19, 0xfe, 0xa9, 0xe6, 0x9a, 0x57, 0x61, 0x5e, 0x5d, 0xa4,
|
||||
0xfc, 0x57, 0xbe, 0x68, 0x03, 0x5f, 0x76, 0x60, 0x71, 0x0b, 0xb5, 0xd1, 0x99, 0x93, 0xca, 0xfc,
|
||||
0x55, 0x83, 0x8b, 0xd2, 0x52, 0x74, 0xdb, 0x32, 0x24, 0x22, 0xe5, 0x4c, 0xff, 0xa8, 0x94, 0xa8,
|
||||
0x6d, 0xd9, 0x09, 0x7c, 0x02, 0x22, 0x7a, 0x09, 0x0a, 0xe8, 0x05, 0x66, 0x75, 0xca, 0x1c, 0xd6,
|
||||
0xe5, 0x69, 0xc9, 0xbf, 0x00, 0x3f, 0x7a, 0x24, 0x4e, 0xf4, 0x75, 0xc8, 0xf3, 0x1d, 0x72, 0xeb,
|
||||
0x0e, 0x13, 0x59, 0x58, 0xa8, 0x1a, 0x23, 0x31, 0x7e, 0x1c, 0x56, 0xcc, 0x46, 0xee, 0xd5, 0x51,
|
||||
0xe9, 0xc2, 0xcb, 0xbf, 0x4a, 0x9a, 0x9d, 0x93, 0x6a, 0xeb, 0xcc, 0x24, 0xb0, 0x24, 0xfd, 0xdb,
|
||||
0x0d, 0x48, 0x03, 0x51, 0x7a, 0xee, 0xe8, 0x23, 0x80, 0x1d, 0x74, 0xfe, 0x41, 0xde, 0x86, 0x82,
|
||||
0xb8, 0x46, 0x81, 0x7e, 0x13, 0xb2, 0xbe, 0x7c, 0xa0, 0xb8, 0x62, 0xa8, 0x8c, 0x0e, 0xaf, 0xab,
|
||||
0x4a, 0x0a, 0x41, 0x08, 0x85, 0xcd, 0x35, 0x58, 0xf8, 0x1a, 0x53, 0xc6, 0xd3, 0x20, 0x82, 0x66,
|
||||
0x19, 0x32, 0xfb, 0xb8, 0xcd, 0x50, 0x20, 0xbd, 0xb5, 0xd5, 0x8e, 0x27, 0x4d, 0x4c, 0x36, 0xaa,
|
||||
0x8d, 0xb4, 0x98, 0x02, 0x45, 0x4d, 0x34, 0x95, 0xc9, 0xd7, 0x4a, 0x51, 0xf3, 0xa5, 0x06, 0x85,
|
||||
0xaf, 0x70, 0xbb, 0x7d, 0xde, 0x20, 0x89, 0x9e, 0x84, 0x9b, 0xbc, 0xf3, 0xc8, 0xdc, 0x52, 0x3b,
|
||||
0x9e, 0x8a, 0x4e, 0xbb, 0x2d, 0x32, 0x2a, 0x67, 0xf3, 0xa5, 0xf9, 0xaf, 0x06, 0x3a, 0x57, 0x7e,
|
||||
0x07, 0x59, 0x12, 0xb5, 0xcd, 0xc4, 0xc9, 0x6d, 0x33, 0x39, 0xa6, 0x6d, 0xa6, 0xc6, 0xb6, 0xcd,
|
||||
0xf4, 0x50, 0xdb, 0x2c, 0x43, 0x8a, 0xfa, 0xa8, 0x21, 0x1a, 0xed, 0xb8, 0xae, 0x27, 0x24, 0xe2,
|
||||
0x28, 0x65, 0xc7, 0xa6, 0xd2, 0x65, 0x78, 0xef, 0xd8, 0xd3, 0x65, 0x64, 0xcd, 0x5f, 0x34, 0x58,
|
||||
0xb0, 0x11, 0xc5, 0x3f, 0xa2, 0x5d, 0xd6, 0x3b, 0xf7, 0x50, 0x2d, 0x41, 0xfa, 0x39, 0x76, 0x59,
|
||||
0x4b, 0x45, 0x4a, 0x6e, 0x38, 0x3a, 0x2d, 0x84, 0x9b, 0x2d, 0x59, 0xfd, 0xf3, 0xb6, 0xda, 0x99,
|
||||
0x3f, 0xc1, 0xc5, 0xcd, 0x36, 0xa1, 0xa8, 0xf6, 0xe0, 0xff, 0x70, 0x4c, 0x86, 0x33, 0x29, 0xa2,
|
||||
0x20, 0x37, 0xe6, 0x97, 0xb0, 0xb0, 0xeb, 0x74, 0xe9, 0x99, 0xfb, 0xe7, 0x0e, 0x2c, 0xda, 0x88,
|
||||
0x76, 0x3b, 0x67, 0x36, 0xb4, 0x0d, 0x97, 0x78, 0x71, 0xee, 0x62, 0xf7, 0x2c, 0xc9, 0x6b, 0xda,
|
||||
0xb2, 0x1f, 0x48, 0x33, 0xaa, 0xc4, 0xef, 0x41, 0x5e, 0xb5, 0x0b, 0x14, 0x96, 0xf9, 0xea, 0xa4,
|
||||
0x32, 0xaf, 0x79, 0xfb, 0xc4, 0x1e, 0xa8, 0x98, 0xaf, 0x35, 0xb8, 0xbc, 0x19, 0x8d, 0xed, 0xb3,
|
||||
0xd2, 0x98, 0x3a, 0x2c, 0xfa, 0x4e, 0x80, 0x3c, 0x56, 0x8f, 0x51, 0x07, 0x19, 0xbe, 0x2a, 0xef,
|
||||
0xff, 0x7f, 0x1e, 0x95, 0xd6, 0x62, 0x84, 0x8c, 0xf8, 0xc8, 0x8b, 0xd4, 0x69, 0xa5, 0x49, 0xae,
|
||||
0xb9, 0xb8, 0x89, 0x28, 0xb3, 0xb6, 0xc4, 0x7f, 0xf6, 0x82, 0x34, 0xb6, 0x79, 0x22, 0xad, 0x48,
|
||||
0xce, 0x40, 0x2b, 0xcc, 0x27, 0xb0, 0x3c, 0xfc, 0xba, 0x08, 0xb8, 0xc2, 0x80, 0x2c, 0x9e, 0xd8,
|
||||
0x21, 0x47, 0xf8, 0x4d, 0x5c, 0xc1, 0xfc, 0x2d, 0x01, 0x8b, 0xdf, 0xf8, 0xee, 0x3b, 0xe0, 0x7e,
|
||||
0x55, 0xc8, 0x07, 0x88, 0x92, 0x6e, 0xd0, 0x40, 0x54, 0x80, 0x35, 0xee, 0x55, 0x03, 0x31, 0x7d,
|
||||
0x0f, 0x0a, 0x8e, 0xe7, 0x11, 0xe6, 0x84, 0x58, 0x70, 0xef, 0xbf, 0xb0, 0x26, 0xfd, 0x0e, 0xb0,
|
||||
0x46, 0xbc, 0xb5, 0xd6, 0x07, 0x26, 0xb6, 0x3d, 0x16, 0xf4, 0xec, 0xb8, 0x51, 0xe3, 0x1e, 0x2c,
|
||||
0x0c, 0x0b, 0xf0, 0xe6, 0x7c, 0x80, 0x7a, 0x6a, 0xf6, 0xf0, 0x25, 0x2f, 0xc1, 0x43, 0xa7, 0xdd,
|
||||
0x45, 0x61, 0x47, 0x15, 0x9b, 0xdb, 0x89, 0x5b, 0x9a, 0xb9, 0x06, 0x17, 0xef, 0x4b, 0x22, 0x1f,
|
||||
0xa2, 0x53, 0x84, 0xac, 0x1c, 0x57, 0x12, 0xef, 0xbc, 0x1d, 0x6e, 0x79, 0x85, 0x44, 0xb2, 0xd1,
|
||||
0xf0, 0xca, 0xaa, 0xdf, 0x01, 0x2a, 0x38, 0xc5, 0x13, 0x38, 0xb1, 0x10, 0xb0, 0x43, 0x41, 0x73,
|
||||
0x1f, 0x0a, 0xdf, 0x39, 0xf8, 0xfc, 0x07, 0x7c, 0x00, 0x73, 0xf2, 0x1e, 0xe5, 0xeb, 0x10, 0x59,
|
||||
0xd2, 0x26, 0x93, 0xa5, 0xc4, 0xdb, 0x90, 0xa5, 0xea, 0xeb, 0x39, 0x48, 0x8b, 0xf1, 0xae, 0x1f,
|
||||
0x40, 0x46, 0x12, 0x61, 0xbd, 0x32, 0x39, 0xe2, 0x23, 0xbf, 0x4d, 0x8c, 0xcf, 0x66, 0x57, 0x50,
|
||||
0x4f, 0xfb, 0x01, 0xd2, 0x82, 0xb0, 0xea, 0x6b, 0x93, 0x55, 0xe3, 0xf4, 0xd9, 0xf8, 0x64, 0x26,
|
||||
0x59, 0x75, 0x43, 0x13, 0x32, 0x92, 0x05, 0x4e, 0x7b, 0xce, 0x08, 0x2b, 0x36, 0x3e, 0x9d, 0x45,
|
||||
0x21, 0xba, 0xe8, 0x19, 0xcc, 0x1f, 0xa3, 0x9b, 0x7a, 0x75, 0x16, 0xf5, 0xe3, 0xac, 0xe3, 0x94,
|
||||
0x57, 0x3e, 0x85, 0xe4, 0x0e, 0x62, 0x7a, 0x79, 0xb2, 0xd2, 0x80, 0x93, 0x1a, 0x1f, 0xcf, 0x20,
|
||||
0x19, 0xe1, 0x96, 0xe2, 0xe3, 0x40, 0xb7, 0x26, 0xab, 0x0c, 0x53, 0x48, 0xa3, 0x32, 0xb3, 0xbc,
|
||||
0xba, 0xa8, 0x06, 0x29, 0xce, 0x08, 0xf5, 0x29, 0xbe, 0xc5, 0x58, 0xa3, 0xb1, 0x3c, 0x92, 0xdc,
|
||||
0xdb, 0x1d, 0x9f, 0xf5, 0xf4, 0x5d, 0x48, 0xf1, 0x52, 0xd2, 0xa7, 0xe4, 0xe1, 0x28, 0xdb, 0x1b,
|
||||
0x6b, 0xf1, 0x11, 0xe4, 0x23, 0x22, 0x34, 0x0d, 0x8a, 0x61, 0xc6, 0x34, 0xd6, 0xe8, 0x03, 0xc8,
|
||||
0x2a, 0x0a, 0xa3, 0x4f, 0x89, 0xf7, 0x71, 0xa6, 0x33, 0xc1, 0x60, 0x5a, 0x50, 0x92, 0x69, 0x1e,
|
||||
0x0e, 0xf3, 0x96, 0xb1, 0x06, 0x1f, 0x42, 0x46, 0x72, 0x93, 0x69, 0x45, 0x33, 0xc2, 0x60, 0xc6,
|
||||
0x9a, 0xc4, 0x90, 0x0b, 0xe9, 0x85, 0x7e, 0x6d, 0x7a, 0x8e, 0xc4, 0xd8, 0x8c, 0x61, 0xcd, 0x2a,
|
||||
0xae, 0x32, 0xea, 0x39, 0x40, 0x6c, 0xa8, 0xdf, 0x98, 0x02, 0xf1, 0x49, 0xf4, 0xc4, 0xf8, 0xfc,
|
||||
0x74, 0x4a, 0xea, 0xe2, 0x87, 0x90, 0x91, 0x63, 0x70, 0x1a, 0x6c, 0x23, 0xc3, 0x72, 0x2c, 0x6c,
|
||||
0xfb, 0x90, 0x55, 0xa3, 0x6b, 0x5a, 0xae, 0x1c, 0x9f, 0x86, 0xc6, 0xb5, 0x19, 0xa5, 0x95, 0xeb,
|
||||
0xdf, 0x43, 0x8a, 0xcf, 0x9c, 0x69, 0x55, 0x18, 0x9b, 0x7f, 0xc6, 0xda, 0x2c, 0xa2, 0xd2, 0xfc,
|
||||
0xc6, 0xb7, 0xaf, 0xde, 0xac, 0x5c, 0xf8, 0xe3, 0xcd, 0xca, 0x85, 0x9f, 0xfb, 0x2b, 0xda, 0xab,
|
||||
0xfe, 0x8a, 0xf6, 0x7b, 0x7f, 0x45, 0xfb, 0xbb, 0xbf, 0xa2, 0x3d, 0xbd, 0xfb, 0x76, 0x7f, 0xa1,
|
||||
0xbc, 0x23, 0x16, 0x4f, 0x12, 0x7b, 0x19, 0x01, 0xd8, 0x8d, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff,
|
||||
0xc7, 0x3c, 0xaa, 0x56, 0xea, 0x14, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
@ -1946,6 +1949,13 @@ func (m *CreateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.RuntimePath) > 0 {
|
||||
i -= len(m.RuntimePath)
|
||||
copy(dAtA[i:], m.RuntimePath)
|
||||
i = encodeVarintTasks(dAtA, i, uint64(len(m.RuntimePath)))
|
||||
i--
|
||||
dAtA[i] = 0x52
|
||||
}
|
||||
if m.Options != nil {
|
||||
{
|
||||
size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
|
||||
|
@ -3198,6 +3208,10 @@ func (m *CreateTaskRequest) Size() (n int) {
|
|||
l = m.Options.Size()
|
||||
n += 1 + l + sovTasks(uint64(l))
|
||||
}
|
||||
l = len(m.RuntimePath)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTasks(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
|
@ -3747,6 +3761,7 @@ func (this *CreateTaskRequest) String() string {
|
|||
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
|
||||
`Checkpoint:` + strings.Replace(fmt.Sprintf("%v", this.Checkpoint), "Descriptor", "types.Descriptor", 1) + `,`,
|
||||
`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`,
|
||||
`RuntimePath:` + fmt.Sprintf("%v", this.RuntimePath) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
|
@ -4385,6 +4400,38 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error {
|
|||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 10:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field RuntimePath", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTasks
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthTasks
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTasks
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.RuntimePath = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTasks(dAtA[iNdEx:])
|
||||
|
|
2
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
generated
vendored
2
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
generated
vendored
|
@ -88,6 +88,8 @@ message CreateTaskRequest {
|
|||
containerd.types.Descriptor checkpoint = 8;
|
||||
|
||||
google.protobuf.Any options = 9;
|
||||
|
||||
string runtime_path = 10;
|
||||
}
|
||||
|
||||
message CreateTaskResponse {
|
||||
|
|
18
vendor/github.com/containerd/containerd/api/services/version/v1/doc.go
generated
vendored
Normal file
18
vendor/github.com/containerd/containerd/api/services/version/v1/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package version defines the version service.
|
||||
package version
|
18
vendor/github.com/containerd/containerd/api/types/task/doc.go
generated
vendored
Normal file
18
vendor/github.com/containerd/containerd/api/types/task/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package task defines the task service.
|
||||
package task
|
54
vendor/github.com/containerd/containerd/archive/compression/compression.go
generated
vendored
54
vendor/github.com/containerd/containerd/archive/compression/compression.go
generated
vendored
|
@ -21,15 +21,16 @@ import (
|
|||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -125,17 +126,52 @@ func (r *bufferedReader) Peek(n int) ([]byte, error) {
|
|||
return r.buf.Peek(n)
|
||||
}
|
||||
|
||||
const (
|
||||
zstdMagicSkippableStart = 0x184D2A50
|
||||
zstdMagicSkippableMask = 0xFFFFFFF0
|
||||
)
|
||||
|
||||
var (
|
||||
gzipMagic = []byte{0x1F, 0x8B, 0x08}
|
||||
zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
|
||||
)
|
||||
|
||||
type matcher = func([]byte) bool
|
||||
|
||||
func magicNumberMatcher(m []byte) matcher {
|
||||
return func(source []byte) bool {
|
||||
return bytes.HasPrefix(source, m)
|
||||
}
|
||||
}
|
||||
|
||||
// zstdMatcher detects zstd compression algorithm.
|
||||
// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames.
|
||||
// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details.
|
||||
func zstdMatcher() matcher {
|
||||
return func(source []byte) bool {
|
||||
if bytes.HasPrefix(source, zstdMagic) {
|
||||
// Zstandard frame
|
||||
return true
|
||||
}
|
||||
// skippable frame
|
||||
if len(source) < 8 {
|
||||
return false
|
||||
}
|
||||
// magic number from 0x184D2A50 to 0x184D2A5F.
|
||||
if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// DetectCompression detects the compression algorithm of the source.
|
||||
func DetectCompression(source []byte) Compression {
|
||||
for compression, m := range map[Compression][]byte{
|
||||
Gzip: {0x1F, 0x8B, 0x08},
|
||||
Zstd: {0x28, 0xb5, 0x2f, 0xfd},
|
||||
for compression, fn := range map[Compression]matcher{
|
||||
Gzip: magicNumberMatcher(gzipMagic),
|
||||
Zstd: zstdMatcher(),
|
||||
} {
|
||||
if len(source) < len(m) {
|
||||
// Len too short
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(m, source[:len(m)]) {
|
||||
if fn(source) {
|
||||
return compression
|
||||
}
|
||||
}
|
||||
|
|
85
vendor/github.com/containerd/containerd/archive/tar.go
generated
vendored
85
vendor/github.com/containerd/containerd/archive/tar.go
generated
vendored
|
@ -19,6 +19,8 @@ package archive
|
|||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -30,7 +32,6 @@ import (
|
|||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufPool = &sync.Pool{
|
||||
|
@ -48,12 +49,15 @@ var errInvalidArchive = errors.New("invalid archive")
|
|||
// Produces a tar using OCI style file markers for deletions. Deleted
|
||||
// files will be prepended with the prefix ".wh.". This style is
|
||||
// based off AUFS whiteouts.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md
|
||||
func Diff(ctx context.Context, a, b string) io.ReadCloser {
|
||||
r, w := io.Pipe()
|
||||
|
||||
go func() {
|
||||
err := WriteDiff(ctx, w, a, b)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Debugf("write diff failed")
|
||||
}
|
||||
if err = w.CloseWithError(err); err != nil {
|
||||
log.G(ctx).WithError(err).Debugf("closing tar pipe failed")
|
||||
}
|
||||
|
@ -68,12 +72,12 @@ func Diff(ctx context.Context, a, b string) io.ReadCloser {
|
|||
// Produces a tar using OCI style file markers for deletions. Deleted
|
||||
// files will be prepended with the prefix ".wh.". This style is
|
||||
// based off AUFS whiteouts.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md
|
||||
func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffOpt) error {
|
||||
var options WriteDiffOptions
|
||||
for _, opt := range opts {
|
||||
if err := opt(&options); err != nil {
|
||||
return errors.Wrap(err, "failed to apply option")
|
||||
return fmt.Errorf("failed to apply option: %w", err)
|
||||
}
|
||||
}
|
||||
if options.writeDiffFunc == nil {
|
||||
|
@ -89,12 +93,12 @@ func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffO
|
|||
// Produces a tar using OCI style file markers for deletions. Deleted
|
||||
// files will be prepended with the prefix ".wh.". This style is
|
||||
// based off AUFS whiteouts.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md
|
||||
func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, _ WriteDiffOptions) error {
|
||||
cw := newChangeWriter(w, b)
|
||||
cw := NewChangeWriter(w, b)
|
||||
err := fs.Changes(ctx, a, b, cw.HandleChange)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create diff tar stream")
|
||||
return fmt.Errorf("failed to create diff tar stream: %w", err)
|
||||
}
|
||||
return cw.Close()
|
||||
}
|
||||
|
@ -102,7 +106,7 @@ func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, _ WriteDiffOp
|
|||
const (
|
||||
// whiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
||||
// filename this means that file has been removed from the base layer.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts
|
||||
whiteoutPrefix = ".wh."
|
||||
|
||||
// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
||||
|
@ -118,14 +122,14 @@ const (
|
|||
)
|
||||
|
||||
// Apply applies a tar stream of an OCI style diff tar.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets
|
||||
func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int64, error) {
|
||||
root = filepath.Clean(root)
|
||||
|
||||
var options ApplyOptions
|
||||
for _, opt := range opts {
|
||||
if err := opt(&options); err != nil {
|
||||
return 0, errors.Wrap(err, "failed to apply option")
|
||||
return 0, fmt.Errorf("failed to apply option: %w", err)
|
||||
}
|
||||
}
|
||||
if options.Filter == nil {
|
||||
|
@ -140,7 +144,7 @@ func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int
|
|||
|
||||
// applyNaive applies a tar stream of an OCI style diff tar to a directory
|
||||
// applying each file as either a whole file or whiteout.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets
|
||||
func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOptions) (size int64, err error) {
|
||||
var (
|
||||
dirs []*tar.Header
|
||||
|
@ -233,7 +237,7 @@ func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOpti
|
|||
ppath, base := filepath.Split(hdr.Name)
|
||||
ppath, err = fs.RootPath(root, ppath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to get root path")
|
||||
return 0, fmt.Errorf("failed to get root path: %w", err)
|
||||
}
|
||||
|
||||
// Join to root before joining to parent path to ensure relative links are
|
||||
|
@ -263,7 +267,7 @@ func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOpti
|
|||
}
|
||||
writeFile, err := convertWhiteout(hdr, path)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to convert whiteout file %q", hdr.Name)
|
||||
return 0, fmt.Errorf("failed to convert whiteout file %q: %w", hdr.Name, err)
|
||||
}
|
||||
if !writeFile {
|
||||
continue
|
||||
|
@ -370,7 +374,7 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header
|
|||
return nil
|
||||
|
||||
default:
|
||||
return errors.Errorf("unhandled tar header type %d\n", hdr.Typeflag)
|
||||
return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
|
||||
}
|
||||
|
||||
// Lchown is not supported on Windows.
|
||||
|
@ -461,7 +465,17 @@ func mkparent(ctx context.Context, path, root string, parents []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type changeWriter struct {
|
||||
// ChangeWriter provides tar stream from filesystem change information.
|
||||
// The privided tar stream is styled as an OCI layer. Change information
|
||||
// (add/modify/delete/unmodified) for each file needs to be passed to this
|
||||
// writer through HandleChange method.
|
||||
//
|
||||
// This should be used combining with continuity's diff computing functionality
|
||||
// (e.g. `fs.Change` of github.com/containerd/continuity/fs).
|
||||
//
|
||||
// See also https://github.com/opencontainers/image-spec/blob/main/layer.md for details
|
||||
// about OCI layers
|
||||
type ChangeWriter struct {
|
||||
tw *tar.Writer
|
||||
source string
|
||||
whiteoutT time.Time
|
||||
|
@ -470,8 +484,11 @@ type changeWriter struct {
|
|||
addedDirs map[string]struct{}
|
||||
}
|
||||
|
||||
func newChangeWriter(w io.Writer, source string) *changeWriter {
|
||||
return &changeWriter{
|
||||
// NewChangeWriter returns ChangeWriter that writes tar stream of the source directory
|
||||
// to the privided writer. Change information (add/modify/delete/unmodified) for each
|
||||
// file needs to be passed through HandleChange method.
|
||||
func NewChangeWriter(w io.Writer, source string) *ChangeWriter {
|
||||
return &ChangeWriter{
|
||||
tw: tar.NewWriter(w),
|
||||
source: source,
|
||||
whiteoutT: time.Now(),
|
||||
|
@ -481,7 +498,10 @@ func newChangeWriter(w io.Writer, source string) *changeWriter {
|
|||
}
|
||||
}
|
||||
|
||||
func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error {
|
||||
// HandleChange receives filesystem change information and reflect that information to
|
||||
// the result tar stream. This function implements `fs.ChangeFunc` of continuity
|
||||
// (github.com/containerd/continuity/fs) and should be used with that package.
|
||||
func (cw *ChangeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -501,7 +521,7 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
return err
|
||||
}
|
||||
if err := cw.tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrap(err, "failed to write whiteout header")
|
||||
return fmt.Errorf("failed to write whiteout header: %w", err)
|
||||
}
|
||||
} else {
|
||||
var (
|
||||
|
@ -536,12 +556,12 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
if strings.HasPrefix(name, string(filepath.Separator)) {
|
||||
name, err = filepath.Rel(string(filepath.Separator), name)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to make path relative")
|
||||
return fmt.Errorf("failed to make path relative: %w", err)
|
||||
}
|
||||
}
|
||||
name, err = tarName(name)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot canonicalize path")
|
||||
return fmt.Errorf("cannot canonicalize path: %w", err)
|
||||
}
|
||||
// suffix with '/' for directories
|
||||
if f.IsDir() && !strings.HasSuffix(name, "/") {
|
||||
|
@ -550,7 +570,7 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
hdr.Name = name
|
||||
|
||||
if err := setHeaderForSpecialDevice(hdr, name, f); err != nil {
|
||||
return errors.Wrap(err, "failed to set device headers")
|
||||
return fmt.Errorf("failed to set device headers: %w", err)
|
||||
}
|
||||
|
||||
// additionalLinks stores file names which must be linked to
|
||||
|
@ -578,8 +598,8 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
}
|
||||
|
||||
if capability, err := getxattr(source, "security.capability"); err != nil {
|
||||
return errors.Wrap(err, "failed to get capabilities xattr")
|
||||
} else if capability != nil {
|
||||
return fmt.Errorf("failed to get capabilities xattr: %w", err)
|
||||
} else if len(capability) > 0 {
|
||||
if hdr.PAXRecords == nil {
|
||||
hdr.PAXRecords = map[string]string{}
|
||||
}
|
||||
|
@ -590,19 +610,19 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
return err
|
||||
}
|
||||
if err := cw.tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrap(err, "failed to write file header")
|
||||
return fmt.Errorf("failed to write file header: %w", err)
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||
file, err := open(source)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open path: %v", source)
|
||||
return fmt.Errorf("failed to open path: %v: %w", source, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
n, err := copyBuffered(context.TODO(), cw.tw, file)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to copy")
|
||||
return fmt.Errorf("failed to copy: %w", err)
|
||||
}
|
||||
if n != hdr.Size {
|
||||
return errors.New("short write copying file")
|
||||
|
@ -621,7 +641,7 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
return err
|
||||
}
|
||||
if err := cw.tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrap(err, "failed to write file header")
|
||||
return fmt.Errorf("failed to write file header: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -629,14 +649,15 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cw *changeWriter) Close() error {
|
||||
// Close closes this writer.
|
||||
func (cw *ChangeWriter) Close() error {
|
||||
if err := cw.tw.Close(); err != nil {
|
||||
return errors.Wrap(err, "failed to close tar writer")
|
||||
return fmt.Errorf("failed to close tar writer: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cw *changeWriter) includeParents(hdr *tar.Header) error {
|
||||
func (cw *ChangeWriter) includeParents(hdr *tar.Header) error {
|
||||
if cw.addedDirs == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -744,7 +765,7 @@ func validateWhiteout(path string) error {
|
|||
dir += string(filepath.Separator)
|
||||
}
|
||||
if !strings.HasPrefix(originalPath, dir) {
|
||||
return errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base)
|
||||
return fmt.Errorf("invalid whiteout name: %v: %w", base, errInvalidArchive)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
2
vendor/github.com/containerd/containerd/archive/tar_freebsd.go
generated
vendored
2
vendor/github.com/containerd/containerd/archive/tar_freebsd.go
generated
vendored
|
@ -1,5 +1,3 @@
|
|||
// +build freebsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
|
1
vendor/github.com/containerd/containerd/archive/tar_mostunix.go
generated
vendored
1
vendor/github.com/containerd/containerd/archive/tar_mostunix.go
generated
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows && !freebsd
|
||||
// +build !windows,!freebsd
|
||||
|
||||
/*
|
||||
|
|
2
vendor/github.com/containerd/containerd/archive/tar_opts_linux.go
generated
vendored
2
vendor/github.com/containerd/containerd/archive/tar_opts_linux.go
generated
vendored
|
@ -1,5 +1,3 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
|
6
vendor/github.com/containerd/containerd/archive/tar_opts_windows.go
generated
vendored
6
vendor/github.com/containerd/containerd/archive/tar_opts_windows.go
generated
vendored
|
@ -1,5 +1,3 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
@ -26,7 +24,7 @@ import (
|
|||
)
|
||||
|
||||
// applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows layer
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets
|
||||
func applyWindowsLayer(ctx context.Context, root string, r io.Reader, options ApplyOptions) (size int64, err error) {
|
||||
return ociwclayer.ImportLayerFromTar(ctx, r, root, options.Parents)
|
||||
}
|
||||
|
@ -47,7 +45,7 @@ func AsWindowsContainerLayer() ApplyOpt {
|
|||
// Produces a tar using OCI style file markers for deletions. Deleted
|
||||
// files will be prepended with the prefix ".wh.". This style is
|
||||
// based off AUFS whiteouts.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
||||
// See https://github.com/opencontainers/image-spec/blob/main/layer.md
|
||||
func writeDiffWindowsLayers(ctx context.Context, w io.Writer, _, layer string, options WriteDiffOptions) error {
|
||||
return ociwclayer.ExportLayerToTar(ctx, w, layer, options.ParentLayers)
|
||||
}
|
||||
|
|
34
vendor/github.com/containerd/containerd/archive/tar_unix.go
generated
vendored
34
vendor/github.com/containerd/containerd/archive/tar_unix.go
generated
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
|
@ -20,14 +21,16 @@ package archive
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/pkg/userns"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -40,6 +43,20 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
|
|||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, fi os.FileInfo) error {
|
||||
// Devmajor and Devminor are only needed for special devices.
|
||||
|
||||
// In FreeBSD, RDev for regular files is -1 (unless overridden by FS):
|
||||
// https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531
|
||||
// (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241).
|
||||
|
||||
// ZFS in particular does not override the default:
|
||||
// https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027
|
||||
|
||||
// Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1).
|
||||
// Such large values cannot be encoded in a tar header.
|
||||
if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar {
|
||||
return nil
|
||||
}
|
||||
s, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.New("unsupported stat type")
|
||||
|
@ -69,6 +86,7 @@ func openFile(name string, flag int, perm os.FileMode) (*os.File, error) {
|
|||
}
|
||||
// Call chmod to avoid permission mask
|
||||
if err := os.Chmod(name, perm); err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
return f, err
|
||||
|
@ -122,7 +140,7 @@ func getxattr(path, attr string) ([]byte, error) {
|
|||
func setxattr(path, key, value string) error {
|
||||
// Do not set trusted attributes
|
||||
if strings.HasPrefix(key, "trusted.") {
|
||||
return errors.Wrap(unix.ENOTSUP, "admin attributes from archive not supported")
|
||||
return fmt.Errorf("admin attributes from archive not supported: %w", unix.ENOTSUP)
|
||||
}
|
||||
return unix.Lsetxattr(path, key, []byte(value), 0)
|
||||
}
|
||||
|
@ -142,12 +160,12 @@ func copyDirInfo(fi os.FileInfo, path string) error {
|
|||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to chown %s", path)
|
||||
return fmt.Errorf("failed to chown %s: %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Chmod(path, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", path)
|
||||
return fmt.Errorf("failed to chmod %s: %w", path, err)
|
||||
}
|
||||
|
||||
timespec := []unix.Timespec{
|
||||
|
@ -155,7 +173,7 @@ func copyDirInfo(fi os.FileInfo, path string) error {
|
|||
unix.NsecToTimespec(syscall.TimespecToNsec(fs.StatMtime(st))),
|
||||
}
|
||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", path)
|
||||
return fmt.Errorf("failed to utime %s: %w", path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -167,7 +185,7 @@ func copyUpXAttrs(dst, src string) error {
|
|||
if err == unix.ENOTSUP || err == sysx.ENODATA {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "failed to list xattrs on %s", src)
|
||||
return fmt.Errorf("failed to list xattrs on %s: %w", src, err)
|
||||
}
|
||||
for _, xattr := range xattrKeys {
|
||||
// Do not copy up trusted attributes
|
||||
|
@ -179,10 +197,10 @@ func copyUpXAttrs(dst, src string) error {
|
|||
if err == unix.ENOTSUP || err == sysx.ENODATA {
|
||||
continue
|
||||
}
|
||||
return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
|
||||
return fmt.Errorf("failed to get xattr %q on %s: %w", xattr, src, err)
|
||||
}
|
||||
if err := lsetxattrCreate(dst, xattr, data); err != nil {
|
||||
return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
|
||||
return fmt.Errorf("failed to set xattr %q on %s: %w", xattr, dst, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
6
vendor/github.com/containerd/containerd/archive/tar_windows.go
generated
vendored
6
vendor/github.com/containerd/containerd/archive/tar_windows.go
generated
vendored
|
@ -1,5 +1,3 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
@ -20,12 +18,12 @@ package archive
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/sys"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// tarName returns platform-specific filepath
|
||||
|
@ -114,7 +112,7 @@ func setxattr(path, key, value string) error {
|
|||
|
||||
func copyDirInfo(fi os.FileInfo, path string) error {
|
||||
if err := os.Chmod(path, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", path)
|
||||
return fmt.Errorf("failed to chmod %s: %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
6
vendor/github.com/containerd/containerd/archive/time_unix.go
generated
vendored
6
vendor/github.com/containerd/containerd/archive/time_unix.go
generated
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
|
@ -19,11 +20,10 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func chtimes(path string, atime, mtime time.Time) error {
|
||||
|
@ -32,7 +32,7 @@ func chtimes(path string, atime, mtime time.Time) error {
|
|||
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
|
||||
|
||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||
return errors.Wrapf(err, "failed call to UtimesNanoAt for %s", path)
|
||||
return fmt.Errorf("failed call to UtimesNanoAt for %s: %w", path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
12
vendor/github.com/containerd/containerd/cio/io_unix.go
generated
vendored
12
vendor/github.com/containerd/containerd/cio/io_unix.go
generated
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
|
@ -20,15 +21,14 @@ package cio
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/fifo"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewFIFOSetInDir returns a new FIFOSet with paths in a temporary directory under root
|
||||
|
@ -38,7 +38,7 @@ func NewFIFOSetInDir(root, id string, terminal bool) (*FIFOSet, error) {
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
dir, err := ioutil.TempDir(root, "")
|
||||
dir, err := os.MkdirTemp(root, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) {
|
|||
|
||||
if fifos.Stdin != "" {
|
||||
if f.Stdin, retErr = fifo.OpenFifo(ctx, fifos.Stdin, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil {
|
||||
return f, errors.Wrapf(retErr, "failed to open stdin fifo")
|
||||
return f, fmt.Errorf("failed to open stdin fifo: %w", retErr)
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil && f.Stdin != nil {
|
||||
|
@ -122,7 +122,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) {
|
|||
}
|
||||
if fifos.Stdout != "" {
|
||||
if f.Stdout, retErr = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil {
|
||||
return f, errors.Wrapf(retErr, "failed to open stdout fifo")
|
||||
return f, fmt.Errorf("failed to open stdout fifo: %w", retErr)
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil && f.Stdout != nil {
|
||||
|
@ -132,7 +132,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) {
|
|||
}
|
||||
if !fifos.Terminal && fifos.Stderr != "" {
|
||||
if f.Stderr, retErr = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil {
|
||||
return f, errors.Wrapf(retErr, "failed to open stderr fifo")
|
||||
return f, fmt.Errorf("failed to open stderr fifo: %w", retErr)
|
||||
}
|
||||
}
|
||||
return f, nil
|
||||
|
|
7
vendor/github.com/containerd/containerd/cio/io_windows.go
generated
vendored
7
vendor/github.com/containerd/containerd/cio/io_windows.go
generated
vendored
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const pipeRoot = `\\.\pipe`
|
||||
|
@ -54,7 +53,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) {
|
|||
if fifos.Stdin != "" {
|
||||
l, err := winio.ListenPipe(fifos.Stdin, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Stdin)
|
||||
return nil, fmt.Errorf("failed to create stdin pipe %s: %w", fifos.Stdin, err)
|
||||
}
|
||||
cios.closers = append(cios.closers, l)
|
||||
|
||||
|
@ -77,7 +76,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) {
|
|||
if fifos.Stdout != "" {
|
||||
l, err := winio.ListenPipe(fifos.Stdout, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create stdout pipe %s", fifos.Stdout)
|
||||
return nil, fmt.Errorf("failed to create stdout pipe %s: %w", fifos.Stdout, err)
|
||||
}
|
||||
cios.closers = append(cios.closers, l)
|
||||
|
||||
|
@ -100,7 +99,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) {
|
|||
if fifos.Stderr != "" {
|
||||
l, err := winio.ListenPipe(fifos.Stderr, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create stderr pipe %s", fifos.Stderr)
|
||||
return nil, fmt.Errorf("failed to create stderr pipe %s: %w", fifos.Stderr, err)
|
||||
}
|
||||
cios.closers = append(cios.closers, l)
|
||||
|
||||
|
|
51
vendor/github.com/containerd/containerd/client.go
generated
vendored
51
vendor/github.com/containerd/containerd/client.go
generated
vendored
|
@ -21,7 +21,6 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -62,10 +61,10 @@ import (
|
|||
ptypes "github.com/gogo/protobuf/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
)
|
||||
|
||||
|
@ -119,31 +118,33 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
|||
}
|
||||
gopts := []grpc.DialOption{
|
||||
grpc.WithBlock(),
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.FailOnNonTempDialError(true),
|
||||
grpc.WithConnectParams(connParams),
|
||||
grpc.WithContextDialer(dialer.ContextDialer),
|
||||
|
||||
// TODO(stevvooe): We may need to allow configuration of this on the client.
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
|
||||
grpc.WithReturnConnectionError(),
|
||||
}
|
||||
if len(copts.dialOptions) > 0 {
|
||||
gopts = copts.dialOptions
|
||||
}
|
||||
gopts = append(gopts, grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize),
|
||||
grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)))
|
||||
if len(copts.callOptions) > 0 {
|
||||
gopts = append(gopts, grpc.WithDefaultCallOptions(copts.callOptions...))
|
||||
}
|
||||
if copts.defaultns != "" {
|
||||
unary, stream := newNSInterceptors(copts.defaultns)
|
||||
gopts = append(gopts,
|
||||
grpc.WithUnaryInterceptor(unary),
|
||||
grpc.WithStreamInterceptor(stream),
|
||||
)
|
||||
gopts = append(gopts, grpc.WithChainUnaryInterceptor(unary))
|
||||
gopts = append(gopts, grpc.WithChainStreamInterceptor(stream))
|
||||
}
|
||||
|
||||
connector := func() (*grpc.ClientConn, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), copts.timeout)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(ctx, dialer.DialAddress(address), gopts...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to dial %q", address)
|
||||
return nil, fmt.Errorf("failed to dial %q: %w", address, err)
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
@ -154,7 +155,7 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
|
|||
c.conn, c.connector = conn, connector
|
||||
}
|
||||
if copts.services == nil && c.conn == nil {
|
||||
return nil, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection or services is available")
|
||||
return nil, fmt.Errorf("no grpc connection or services is available: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
|
||||
// check namespace labels for default runtime
|
||||
|
@ -214,7 +215,7 @@ type Client struct {
|
|||
// Reconnect re-establishes the GRPC connection to the containerd daemon
|
||||
func (c *Client) Reconnect() error {
|
||||
if c.connector == nil {
|
||||
return errors.Wrap(errdefs.ErrUnavailable, "unable to reconnect to containerd, no connector available")
|
||||
return fmt.Errorf("unable to reconnect to containerd, no connector available: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
|
@ -242,7 +243,7 @@ func (c *Client) IsServing(ctx context.Context) (bool, error) {
|
|||
c.connMu.Lock()
|
||||
if c.conn == nil {
|
||||
c.connMu.Unlock()
|
||||
return false, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
|
||||
return false, fmt.Errorf("no grpc connection available: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
c.connMu.Unlock()
|
||||
r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true))
|
||||
|
@ -265,8 +266,8 @@ func (c *Client) Containers(ctx context.Context, filters ...string) ([]Container
|
|||
return out, nil
|
||||
}
|
||||
|
||||
// NewContainer will create a new container in container with the provided id
|
||||
// the id must be unique within the namespace
|
||||
// NewContainer will create a new container with the provided id.
|
||||
// The id must be unique within the namespace.
|
||||
func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) {
|
||||
ctx, done, err := c.WithLease(ctx)
|
||||
if err != nil {
|
||||
|
@ -369,9 +370,7 @@ type RemoteContext struct {
|
|||
|
||||
func defaultRemoteContext() *RemoteContext {
|
||||
return &RemoteContext{
|
||||
Resolver: docker.NewResolver(docker.ResolverOptions{
|
||||
Client: http.DefaultClient,
|
||||
}),
|
||||
Resolver: docker.NewResolver(docker.ResolverOptions{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -386,7 +385,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag
|
|||
}
|
||||
|
||||
if fetchCtx.Unpack {
|
||||
return images.Image{}, errors.Wrap(errdefs.ErrNotImplemented, "unpack on fetch not supported, try pull")
|
||||
return images.Image{}, fmt.Errorf("unpack on fetch not supported, try pull: %w", errdefs.ErrNotImplemented)
|
||||
}
|
||||
|
||||
if fetchCtx.PlatformMatcher == nil {
|
||||
|
@ -397,7 +396,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag
|
|||
for _, s := range fetchCtx.Platforms {
|
||||
p, err := platforms.Parse(s)
|
||||
if err != nil {
|
||||
return images.Image{}, errors.Wrapf(err, "invalid platform %s", s)
|
||||
return images.Image{}, fmt.Errorf("invalid platform %s: %w", s, err)
|
||||
}
|
||||
ps = append(ps, p)
|
||||
}
|
||||
|
@ -433,7 +432,7 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor,
|
|||
for _, platform := range pushCtx.Platforms {
|
||||
p, err := platforms.Parse(platform)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "invalid platform %s", platform)
|
||||
return fmt.Errorf("invalid platform %s: %w", platform, err)
|
||||
}
|
||||
ps = append(ps, p)
|
||||
}
|
||||
|
@ -716,7 +715,7 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
|
|||
c.connMu.Lock()
|
||||
if c.conn == nil {
|
||||
c.connMu.Unlock()
|
||||
return Version{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
|
||||
return Version{}, fmt.Errorf("no grpc connection available: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
c.connMu.Unlock()
|
||||
response, err := c.VersionService().Version(ctx, &ptypes.Empty{})
|
||||
|
@ -739,7 +738,7 @@ func (c *Client) Server(ctx context.Context) (ServerInfo, error) {
|
|||
c.connMu.Lock()
|
||||
if c.conn == nil {
|
||||
c.connMu.Unlock()
|
||||
return ServerInfo{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
|
||||
return ServerInfo{}, fmt.Errorf("no grpc connection available: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
c.connMu.Unlock()
|
||||
|
||||
|
@ -777,7 +776,7 @@ func (c *Client) getSnapshotter(ctx context.Context, name string) (snapshots.Sna
|
|||
|
||||
s := c.SnapshotService(name)
|
||||
if s == nil {
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "snapshotter %s was not found", name)
|
||||
return nil, fmt.Errorf("snapshotter %s was not found: %w", name, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
|
|
9
vendor/github.com/containerd/containerd/client_opts.go
generated
vendored
9
vendor/github.com/containerd/containerd/client_opts.go
generated
vendored
|
@ -34,6 +34,7 @@ type clientOpts struct {
|
|||
defaultPlatform platforms.MatchComparer
|
||||
services *services
|
||||
dialOptions []grpc.DialOption
|
||||
callOptions []grpc.CallOption
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
|
@ -75,6 +76,14 @@ func WithDialOpts(opts []grpc.DialOption) ClientOpt {
|
|||
}
|
||||
}
|
||||
|
||||
// WithCallOpts allows grpc.CallOptions to be set on the connection
|
||||
func WithCallOpts(opts []grpc.CallOption) ClientOpt {
|
||||
return func(c *clientOpts) error {
|
||||
c.callOptions = opts
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithServices sets services used by the client.
|
||||
func WithServices(opts ...ServicesOpt) ClientOpt {
|
||||
return func(c *clientOpts) error {
|
||||
|
|
12
vendor/github.com/containerd/containerd/container.go
generated
vendored
12
vendor/github.com/containerd/containerd/container.go
generated
vendored
|
@ -19,6 +19,7 @@ package containerd
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -38,7 +39,6 @@ import (
|
|||
ver "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -173,7 +173,7 @@ func (c *container) Spec(ctx context.Context) (*oci.Spec, error) {
|
|||
// an error is returned if the container has running tasks
|
||||
func (c *container) Delete(ctx context.Context, opts ...DeleteOpts) error {
|
||||
if _, err := c.loadTask(ctx, nil); err == nil {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot delete running task %v", c.id)
|
||||
return fmt.Errorf("cannot delete running task %v: %w", c.id, errdefs.ErrFailedPrecondition)
|
||||
}
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
|
@ -198,11 +198,11 @@ func (c *container) Image(ctx context.Context) (Image, error) {
|
|||
return nil, err
|
||||
}
|
||||
if r.Image == "" {
|
||||
return nil, errors.Wrap(errdefs.ErrNotFound, "container not created from an image")
|
||||
return nil, fmt.Errorf("container not created from an image: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
i, err := c.client.ImageService().Get(ctx, r.Image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get image %s for container", r.Image)
|
||||
return nil, fmt.Errorf("failed to get image %s for container: %w", r.Image, err)
|
||||
}
|
||||
return NewImage(c.client, i), nil
|
||||
}
|
||||
|
@ -232,7 +232,7 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N
|
|||
}
|
||||
if r.SnapshotKey != "" {
|
||||
if r.Snapshotter == "" {
|
||||
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "unable to resolve rootfs mounts without snapshotter on container")
|
||||
return nil, fmt.Errorf("unable to resolve rootfs mounts without snapshotter on container: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
// get the rootfs from the snapshotter and add it to the request
|
||||
|
@ -391,7 +391,7 @@ func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, er
|
|||
if err != nil {
|
||||
err = errdefs.FromGRPC(err)
|
||||
if errdefs.IsNotFound(err) {
|
||||
return nil, errors.Wrapf(err, "no running task found")
|
||||
return nil, fmt.Errorf("no running task found: %w", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
2
vendor/github.com/containerd/containerd/container_checkpoint_opts.go
generated
vendored
2
vendor/github.com/containerd/containerd/container_checkpoint_opts.go
generated
vendored
|
@ -19,6 +19,7 @@ package containerd
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
|
@ -31,7 +32,6 @@ import (
|
|||
"github.com/containerd/containerd/runtime/v2/runc/options"
|
||||
"github.com/containerd/typeurl"
|
||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
10
vendor/github.com/containerd/containerd/container_opts.go
generated
vendored
10
vendor/github.com/containerd/containerd/container_opts.go
generated
vendored
|
@ -19,6 +19,7 @@ package containerd
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
|
@ -31,7 +32,6 @@ import (
|
|||
"github.com/gogo/protobuf/types"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DeleteOpts allows the caller to set options for the deletion of a container
|
||||
|
@ -227,7 +227,7 @@ func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts
|
|||
func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Container) error {
|
||||
if c.SnapshotKey != "" {
|
||||
if c.Snapshotter == "" {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot")
|
||||
return fmt.Errorf("container.Snapshotter must be set to cleanup rootfs snapshot: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
s, err := client.getSnapshotter(ctx, c.Snapshotter)
|
||||
if err != nil {
|
||||
|
@ -276,15 +276,15 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer
|
|||
func WithContainerExtension(name string, extension interface{}) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
if name == "" {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "extension key must not be zero-length")
|
||||
return fmt.Errorf("extension key must not be zero-length: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
any, err := typeurl.MarshalAny(extension)
|
||||
if err != nil {
|
||||
if errors.Is(err, typeurl.ErrNotFound) {
|
||||
return errors.Wrapf(err, "extension %q is not registered with the typeurl package, see `typeurl.Register`", name)
|
||||
return fmt.Errorf("extension %q is not registered with the typeurl package, see `typeurl.Register`: %w", name, err)
|
||||
}
|
||||
return errors.Wrap(err, "error marshalling extension")
|
||||
return fmt.Errorf("error marshalling extension: %w", err)
|
||||
}
|
||||
|
||||
if c.Extensions == nil {
|
||||
|
|
1
vendor/github.com/containerd/containerd/container_opts_unix.go
generated
vendored
1
vendor/github.com/containerd/containerd/container_opts_unix.go
generated
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
|
|
9
vendor/github.com/containerd/containerd/container_restore_opts.go
generated
vendored
9
vendor/github.com/containerd/containerd/container_restore_opts.go
generated
vendored
|
@ -18,6 +18,8 @@ package containerd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/content"
|
||||
|
@ -26,7 +28,6 @@ import (
|
|||
ptypes "github.com/gogo/protobuf/types"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -46,7 +47,7 @@ func WithRestoreImage(ctx context.Context, id string, client *Client, checkpoint
|
|||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
name, ok := index.Annotations[checkpointImageNameLabel]
|
||||
if !ok || name == "" {
|
||||
return ErrRuntimeNameNotFoundInIndex
|
||||
return ErrImageNameNotFoundInIndex
|
||||
}
|
||||
snapshotter, ok := index.Annotations[checkpointSnapshotterNameLabel]
|
||||
if !ok || name == "" {
|
||||
|
@ -92,7 +93,7 @@ func WithRestoreRuntime(ctx context.Context, id string, client *Client, checkpoi
|
|||
store := client.ContentStore()
|
||||
data, err := content.ReadBlob(ctx, store, *m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to read checkpoint runtime")
|
||||
return fmt.Errorf("unable to read checkpoint runtime: %w", err)
|
||||
}
|
||||
if err := proto.Unmarshal(data, &options); err != nil {
|
||||
return err
|
||||
|
@ -117,7 +118,7 @@ func WithRestoreSpec(ctx context.Context, id string, client *Client, checkpoint
|
|||
store := client.ContentStore()
|
||||
data, err := content.ReadBlob(ctx, store, *m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to read checkpoint config")
|
||||
return fmt.Errorf("unable to read checkpoint config: %w", err)
|
||||
}
|
||||
var any ptypes.Any
|
||||
if err := proto.Unmarshal(data, &any); err != nil {
|
||||
|
|
35
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
35
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
|
@ -18,8 +18,9 @@ package content
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -27,7 +28,6 @@ import (
|
|||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
|
@ -77,7 +77,7 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc o
|
|||
cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc))
|
||||
if err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return errors.Wrap(err, "failed to open writer")
|
||||
return fmt.Errorf("failed to open writer: %w", err)
|
||||
}
|
||||
|
||||
return nil // all ready present
|
||||
|
@ -134,28 +134,28 @@ func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, er
|
|||
func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get status")
|
||||
return fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
|
||||
if ws.Offset > 0 {
|
||||
r, err = seekReader(r, ws.Offset, size)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
||||
return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
|
||||
}
|
||||
}
|
||||
|
||||
copied, err := copyWithBuffer(cw, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to copy")
|
||||
return fmt.Errorf("failed to copy: %w", err)
|
||||
}
|
||||
if size != 0 && copied < size-ws.Offset {
|
||||
// Short writes would return its own error, this indicates a read failure
|
||||
return errors.Wrapf(io.ErrUnexpectedEOF, "failed to read expected number of bytes")
|
||||
return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
|
||||
}
|
||||
|
||||
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
|
||||
return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -172,11 +172,11 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
|
|||
|
||||
copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to copy")
|
||||
return fmt.Errorf("failed to copy: %w", err)
|
||||
}
|
||||
if copied < n {
|
||||
// Short writes would return its own error, this indicates a read failure
|
||||
return errors.Wrap(io.ErrUnexpectedEOF, "failed to read expected number of bytes")
|
||||
return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -190,13 +190,13 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
|
|||
func CopyReader(cw Writer, r io.Reader) (int64, error) {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to get status")
|
||||
return 0, fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
|
||||
if ws.Offset > 0 {
|
||||
r, err = seekReader(r, ws.Offset, 0)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
||||
return 0, fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -212,7 +212,10 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
|||
if ok {
|
||||
nn, err := seeker.Seek(offset, io.SeekStart)
|
||||
if nn != offset {
|
||||
return nil, errors.Wrapf(err, "failed to seek to offset %v", offset)
|
||||
if err == nil {
|
||||
err = fmt.Errorf("unexpected seek location without seek error")
|
||||
}
|
||||
return nil, fmt.Errorf("failed to seek to offset %v: %w", offset, err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -230,12 +233,12 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
|||
}
|
||||
|
||||
// well then, let's just discard up to the offset
|
||||
n, err := copyWithBuffer(ioutil.Discard, io.LimitReader(r, offset))
|
||||
n, err := copyWithBuffer(io.Discard, io.LimitReader(r, offset))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to discard to offset")
|
||||
return nil, fmt.Errorf("failed to discard to offset: %w", err)
|
||||
}
|
||||
if n != offset {
|
||||
return nil, errors.Errorf("unable to discard to offset")
|
||||
return nil, errors.New("unable to discard to offset")
|
||||
}
|
||||
|
||||
return r, nil
|
||||
|
|
10
vendor/github.com/containerd/containerd/content/local/locks.go
generated
vendored
10
vendor/github.com/containerd/containerd/content/local/locks.go
generated
vendored
|
@ -17,11 +17,11 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Handles locking references
|
||||
|
@ -41,7 +41,13 @@ func tryLock(ref string) error {
|
|||
defer locksMu.Unlock()
|
||||
|
||||
if v, ok := locks[ref]; ok {
|
||||
return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked since %s", ref, v.since)
|
||||
// Returning the duration may help developers distinguish dead locks (long duration) from
|
||||
// lock contentions (short duration).
|
||||
now := time.Now()
|
||||
return fmt.Errorf(
|
||||
"ref %s locked for %s (since %s): %w", ref, now.Sub(v.since), v.since,
|
||||
errdefs.ErrUnavailable,
|
||||
)
|
||||
}
|
||||
|
||||
locks[ref] = &lock{time.Now()}
|
||||
|
|
7
vendor/github.com/containerd/containerd/content/local/readerat.go
generated
vendored
7
vendor/github.com/containerd/containerd/content/local/readerat.go
generated
vendored
|
@ -17,10 +17,9 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
)
|
||||
|
@ -40,7 +39,7 @@ func OpenReader(p string) (content.ReaderAt, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found")
|
||||
return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
fp, err := os.Open(p)
|
||||
|
@ -49,7 +48,7 @@ func OpenReader(p string) (content.ReaderAt, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found")
|
||||
return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return sizeReaderAt{size: fi.Size(), fp: fp}, nil
|
||||
|
|
77
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
77
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
|
@ -20,7 +20,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -37,7 +36,6 @@ import (
|
|||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
|
@ -94,13 +92,13 @@ func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
|
|||
func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
|
||||
p, err := s.blobPath(dgst)
|
||||
if err != nil {
|
||||
return content.Info{}, errors.Wrapf(err, "calculating blob info path")
|
||||
return content.Info{}, fmt.Errorf("calculating blob info path: %w", err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
|
||||
err = fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return content.Info{}, err
|
||||
|
@ -129,12 +127,12 @@ func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]strin
|
|||
func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
|
||||
p, err := s.blobPath(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "calculating blob path for ReaderAt")
|
||||
return nil, fmt.Errorf("calculating blob path for ReaderAt: %w", err)
|
||||
}
|
||||
|
||||
reader, err := OpenReader(p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "blob %s expected at %s", desc.Digest, p)
|
||||
return nil, fmt.Errorf("blob %s expected at %s: %w", desc.Digest, p, err)
|
||||
}
|
||||
|
||||
return reader, nil
|
||||
|
@ -147,7 +145,7 @@ func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.
|
|||
func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
bp, err := s.blobPath(dgst)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "calculating blob path for delete")
|
||||
return fmt.Errorf("calculating blob path for delete: %w", err)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(bp); err != nil {
|
||||
|
@ -155,7 +153,7 @@ func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
|
|||
return err
|
||||
}
|
||||
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
|
||||
return fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -163,18 +161,18 @@ func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
|
|||
|
||||
func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
|
||||
if s.ls == nil {
|
||||
return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store")
|
||||
return content.Info{}, fmt.Errorf("update not supported on immutable content store: %w", errdefs.ErrFailedPrecondition)
|
||||
}
|
||||
|
||||
p, err := s.blobPath(info.Digest)
|
||||
if err != nil {
|
||||
return content.Info{}, errors.Wrapf(err, "calculating blob path for update")
|
||||
return content.Info{}, fmt.Errorf("calculating blob path for update: %w", err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrapf(errdefs.ErrNotFound, "content %v", info.Digest)
|
||||
err = fmt.Errorf("content %v: %w", info.Digest, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return content.Info{}, err
|
||||
|
@ -201,7 +199,7 @@ func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...str
|
|||
all = true
|
||||
labels = info.Labels
|
||||
default:
|
||||
return content.Info{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest)
|
||||
return content.Info{}, fmt.Errorf("cannot update %q field on content info %q: %w", path, info.Digest, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -378,7 +376,7 @@ func (s *store) status(ingestPath string) (content.Status, error) {
|
|||
fi, err := os.Stat(dp)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
|
||||
err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound)
|
||||
}
|
||||
return content.Status{}, err
|
||||
}
|
||||
|
@ -386,19 +384,19 @@ func (s *store) status(ingestPath string) (content.Status, error) {
|
|||
ref, err := readFileString(filepath.Join(ingestPath, "ref"))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
|
||||
err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound)
|
||||
}
|
||||
return content.Status{}, err
|
||||
}
|
||||
|
||||
startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat"))
|
||||
if err != nil {
|
||||
return content.Status{}, errors.Wrapf(err, "could not read startedat")
|
||||
return content.Status{}, fmt.Errorf("could not read startedat: %w", err)
|
||||
}
|
||||
|
||||
updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat"))
|
||||
if err != nil {
|
||||
return content.Status{}, errors.Wrapf(err, "could not read updatedat")
|
||||
return content.Status{}, fmt.Errorf("could not read updatedat: %w", err)
|
||||
}
|
||||
|
||||
// because we don't write updatedat on every write, the mod time may
|
||||
|
@ -461,7 +459,7 @@ func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.
|
|||
// TODO(AkihiroSuda): we could create a random string or one calculated based on the context
|
||||
// https://github.com/containerd/containerd/issues/2129#issuecomment-380255019
|
||||
if wOpts.Ref == "" {
|
||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty")
|
||||
return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
var lockErr error
|
||||
for count := uint64(0); count < 10; count++ {
|
||||
|
@ -495,16 +493,16 @@ func (s *store) resumeStatus(ref string, total int64, digester digest.Digester)
|
|||
path, _, data := s.ingestPaths(ref)
|
||||
status, err := s.status(path)
|
||||
if err != nil {
|
||||
return status, errors.Wrap(err, "failed reading status of resume write")
|
||||
return status, fmt.Errorf("failed reading status of resume write: %w", err)
|
||||
}
|
||||
if ref != status.Ref {
|
||||
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
|
||||
// layout corruption or a hash collision for the ref key.
|
||||
return status, errors.Errorf("ref key does not match: %v != %v", ref, status.Ref)
|
||||
return status, fmt.Errorf("ref key does not match: %v != %v", ref, status.Ref)
|
||||
}
|
||||
|
||||
if total > 0 && status.Total > 0 && total != status.Total {
|
||||
return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
|
||||
return status, fmt.Errorf("provided total differs from status: %v != %v", total, status.Total)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
|
||||
|
@ -528,10 +526,10 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
|
|||
if expected != "" {
|
||||
p, err := s.blobPath(expected)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "calculating expected blob path for writer")
|
||||
return nil, fmt.Errorf("calculating expected blob path for writer: %w", err)
|
||||
}
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected)
|
||||
return nil, fmt.Errorf("content %v: %w", expected, errdefs.ErrAlreadyExists)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -568,7 +566,7 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
|
|||
|
||||
// the ingest is new, we need to setup the target location.
|
||||
// write the ref to a file for later use
|
||||
if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil {
|
||||
if err := os.WriteFile(refp, []byte(ref), 0666); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -581,7 +579,7 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
|
|||
}
|
||||
|
||||
if total > 0 {
|
||||
if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
|
||||
if err := os.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -589,11 +587,12 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
|
|||
|
||||
fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open data file")
|
||||
return nil, fmt.Errorf("failed to open data file: %w", err)
|
||||
}
|
||||
|
||||
if _, err := fp.Seek(offset, io.SeekStart); err != nil {
|
||||
return nil, errors.Wrap(err, "could not seek to current write offset")
|
||||
fp.Close()
|
||||
return nil, fmt.Errorf("could not seek to current write offset: %w", err)
|
||||
}
|
||||
|
||||
return &writer{
|
||||
|
@ -615,7 +614,7 @@ func (s *store) Abort(ctx context.Context, ref string) error {
|
|||
root := s.ingestRoot(ref)
|
||||
if err := os.RemoveAll(root); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref)
|
||||
return fmt.Errorf("ingest ref %q: %w", ref, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return err
|
||||
|
@ -626,7 +625,7 @@ func (s *store) Abort(ctx context.Context, ref string) error {
|
|||
|
||||
func (s *store) blobPath(dgst digest.Digest) (string, error) {
|
||||
if err := dgst.Validate(); err != nil {
|
||||
return "", errors.Wrapf(errdefs.ErrInvalidArgument, "cannot calculate blob path from invalid digest: %v", err)
|
||||
return "", fmt.Errorf("cannot calculate blob path from invalid digest: %v: %w", err, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()), nil
|
||||
|
@ -656,23 +655,23 @@ func (s *store) ingestPaths(ref string) (string, string, string) {
|
|||
}
|
||||
|
||||
func readFileString(path string) (string, error) {
|
||||
p, err := ioutil.ReadFile(path)
|
||||
p, err := os.ReadFile(path)
|
||||
return string(p), err
|
||||
}
|
||||
|
||||
// readFileTimestamp reads a file with just a timestamp present.
|
||||
func readFileTimestamp(p string) (time.Time, error) {
|
||||
b, err := ioutil.ReadFile(p)
|
||||
b, err := os.ReadFile(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
|
||||
err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound)
|
||||
}
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
var t time.Time
|
||||
if err := t.UnmarshalText(b); err != nil {
|
||||
return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p)
|
||||
return time.Time{}, fmt.Errorf("could not parse timestamp file %v: %w", p, err)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
|
@ -683,19 +682,23 @@ func writeTimestampFile(p string, t time.Time) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return atomicWrite(p, b, 0666)
|
||||
return writeToCompletion(p, b, 0666)
|
||||
}
|
||||
|
||||
func atomicWrite(path string, data []byte, mode os.FileMode) error {
|
||||
func writeToCompletion(path string, data []byte, mode os.FileMode) error {
|
||||
tmp := fmt.Sprintf("%s.tmp", path)
|
||||
f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create tmp file")
|
||||
return fmt.Errorf("create tmp file: %w", err)
|
||||
}
|
||||
_, err = f.Write(data)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "write atomic data")
|
||||
return fmt.Errorf("write tmp file: %w", err)
|
||||
}
|
||||
return os.Rename(tmp, path)
|
||||
err = os.Rename(tmp, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rename tmp file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
3
vendor/github.com/containerd/containerd/content/local/store_bsd.go
generated
vendored
3
vendor/github.com/containerd/containerd/content/local/store_bsd.go
generated
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build darwin || freebsd || netbsd
|
||||
// +build darwin freebsd netbsd
|
||||
|
||||
/*
|
||||
|
@ -26,7 +27,7 @@ import (
|
|||
|
||||
func getATime(fi os.FileInfo) time.Time {
|
||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well.
|
||||
return time.Unix(st.Atimespec.Unix())
|
||||
}
|
||||
|
||||
return fi.ModTime()
|
||||
|
|
3
vendor/github.com/containerd/containerd/content/local/store_openbsd.go
generated
vendored
3
vendor/github.com/containerd/containerd/content/local/store_openbsd.go
generated
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build openbsd
|
||||
// +build openbsd
|
||||
|
||||
/*
|
||||
|
@ -26,7 +27,7 @@ import (
|
|||
|
||||
func getATime(fi os.FileInfo) time.Time {
|
||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well.
|
||||
return time.Unix(st.Atim.Unix())
|
||||
}
|
||||
|
||||
return fi.ModTime()
|
||||
|
|
3
vendor/github.com/containerd/containerd/content/local/store_unix.go
generated
vendored
3
vendor/github.com/containerd/containerd/content/local/store_unix.go
generated
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build linux || solaris
|
||||
// +build linux solaris
|
||||
|
||||
/*
|
||||
|
@ -26,7 +27,7 @@ import (
|
|||
|
||||
func getATime(fi os.FileInfo) time.Time {
|
||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well.
|
||||
return time.Unix(st.Atim.Unix())
|
||||
}
|
||||
|
||||
return fi.ModTime()
|
||||
|
|
27
vendor/github.com/containerd/containerd/content/local/writer.go
generated
vendored
27
vendor/github.com/containerd/containerd/content/local/writer.go
generated
vendored
|
@ -18,6 +18,8 @@ package local
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -28,7 +30,6 @@ import (
|
|||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// writer represents a write transaction against the blob store.
|
||||
|
@ -88,30 +89,30 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
|
|||
w.fp = nil
|
||||
|
||||
if fp == nil {
|
||||
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
|
||||
return fmt.Errorf("cannot commit on closed writer: %w", errdefs.ErrFailedPrecondition)
|
||||
}
|
||||
|
||||
if err := fp.Sync(); err != nil {
|
||||
fp.Close()
|
||||
return errors.Wrap(err, "sync failed")
|
||||
return fmt.Errorf("sync failed: %w", err)
|
||||
}
|
||||
|
||||
fi, err := fp.Stat()
|
||||
closeErr := fp.Close()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "stat on ingest file failed")
|
||||
return fmt.Errorf("stat on ingest file failed: %w", err)
|
||||
}
|
||||
if closeErr != nil {
|
||||
return errors.Wrap(err, "failed to close ingest file")
|
||||
return fmt.Errorf("failed to close ingest file: %w", closeErr)
|
||||
}
|
||||
|
||||
if size > 0 && size != fi.Size() {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fi.Size(), size)
|
||||
return fmt.Errorf("unexpected commit size %d, expected %d: %w", fi.Size(), size, errdefs.ErrFailedPrecondition)
|
||||
}
|
||||
|
||||
dgst := w.digester.Digest()
|
||||
if expected != "" && expected != dgst {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
|
||||
return fmt.Errorf("unexpected commit digest %s, expected %s: %w", dgst, expected, errdefs.ErrFailedPrecondition)
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -127,9 +128,9 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
|
|||
if _, err := os.Stat(target); err == nil {
|
||||
// collision with the target file!
|
||||
if err := os.RemoveAll(w.path); err != nil {
|
||||
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory")
|
||||
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory")
|
||||
}
|
||||
return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst)
|
||||
return fmt.Errorf("content %v: %w", dgst, errdefs.ErrAlreadyExists)
|
||||
}
|
||||
|
||||
if err := os.Rename(ingest, target); err != nil {
|
||||
|
@ -142,17 +143,17 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
|
|||
|
||||
commitTime := time.Now()
|
||||
if err := os.Chtimes(target, commitTime, commitTime); err != nil {
|
||||
log.G(ctx).WithField("digest", dgst).Errorf("failed to change file time to commit time")
|
||||
log.G(ctx).WithField("digest", dgst).Error("failed to change file time to commit time")
|
||||
}
|
||||
|
||||
// clean up!!
|
||||
if err := os.RemoveAll(w.path); err != nil {
|
||||
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory")
|
||||
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory")
|
||||
}
|
||||
|
||||
if w.s.ls != nil && base.Labels != nil {
|
||||
if err := w.s.ls.Set(dgst, base.Labels); err != nil {
|
||||
log.G(ctx).WithField("digest", dgst).Errorf("failed to set labels")
|
||||
log.G(ctx).WithField("digest", dgst).Error("failed to set labels")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,7 +166,7 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
|
|||
// NOTE: Windows does not support this operation
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil {
|
||||
log.G(ctx).WithField("ref", w.ref).Errorf("failed to make readonly")
|
||||
log.G(ctx).WithField("ref", w.ref).Error("failed to make readonly")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
12
vendor/github.com/containerd/containerd/content/proxy/content_writer.go
generated
vendored
12
vendor/github.com/containerd/containerd/content/proxy/content_writer.go
generated
vendored
|
@ -18,13 +18,13 @@ package proxy
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type remoteWriter struct {
|
||||
|
@ -57,7 +57,7 @@ func (rw *remoteWriter) Status() (content.Status, error) {
|
|||
Action: contentapi.WriteActionStat,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Status{}, errors.Wrap(errdefs.FromGRPC(err), "error getting writer status")
|
||||
return content.Status{}, fmt.Errorf("error getting writer status: %w", errdefs.FromGRPC(err))
|
||||
}
|
||||
|
||||
return content.Status{
|
||||
|
@ -82,7 +82,7 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) {
|
|||
Data: p,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(errdefs.FromGRPC(err), "failed to send write")
|
||||
return 0, fmt.Errorf("failed to send write: %w", errdefs.FromGRPC(err))
|
||||
}
|
||||
|
||||
n = int(resp.Offset - offset)
|
||||
|
@ -119,15 +119,15 @@ func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.
|
|||
Labels: base.Labels,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(errdefs.FromGRPC(err), "commit failed")
|
||||
return fmt.Errorf("commit failed: %w", errdefs.FromGRPC(err))
|
||||
}
|
||||
|
||||
if size != 0 && resp.Offset != size {
|
||||
return errors.Errorf("unexpected size: %v != %v", resp.Offset, size)
|
||||
return fmt.Errorf("unexpected size: %v != %v", resp.Offset, size)
|
||||
}
|
||||
|
||||
if expected != "" && resp.Digest != expected {
|
||||
return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
|
||||
return fmt.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
|
||||
}
|
||||
|
||||
rw.digest = resp.Digest
|
||||
|
|
12
vendor/github.com/containerd/containerd/contrib/nvidia/nvidia.go
generated
vendored
12
vendor/github.com/containerd/containerd/contrib/nvidia/nvidia.go
generated
vendored
|
@ -20,13 +20,13 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/oci"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
// NvidiaCLI is the path to the Nvidia helper binary
|
||||
|
@ -111,6 +111,7 @@ type config struct {
|
|||
LDConfig string
|
||||
Requirements []string
|
||||
OCIHookPath string
|
||||
NoCgroups bool
|
||||
}
|
||||
|
||||
func (c *config) args() []string {
|
||||
|
@ -137,6 +138,9 @@ func (c *config) args() []string {
|
|||
for _, r := range c.Requirements {
|
||||
args = append(args, fmt.Sprintf("--require=%s", r))
|
||||
}
|
||||
if c.NoCgroups {
|
||||
args = append(args, "--no-cgroups")
|
||||
}
|
||||
args = append(args, "--pid={{pid}}", "{{rootfs}}")
|
||||
return args
|
||||
}
|
||||
|
@ -209,3 +213,9 @@ func WithLookupOCIHookPath(name string) Opts {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithNoCgroups passes --no-cgroups option to nvidia-container-cli.
|
||||
func WithNoCgroups(c *config) error {
|
||||
c.NoCgroups = true
|
||||
return nil
|
||||
}
|
||||
|
|
37
vendor/github.com/containerd/containerd/defaults/defaults_darwin.go
generated
vendored
Normal file
37
vendor/github.com/containerd/containerd/defaults/defaults_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultRootDir is the default location used by containerd to store
|
||||
// persistent data
|
||||
DefaultRootDir = "/var/lib/containerd"
|
||||
// DefaultStateDir is the default location used by containerd to store
|
||||
// transient data
|
||||
DefaultStateDir = "/var/run/containerd"
|
||||
// DefaultAddress is the default unix socket address
|
||||
DefaultAddress = "/var/run/containerd/containerd.sock"
|
||||
// DefaultDebugAddress is the default unix socket address for pprof data
|
||||
DefaultDebugAddress = "/var/run/containerd/debug.sock"
|
||||
// DefaultFIFODir is the default location used by client-side cio library
|
||||
// to store FIFOs.
|
||||
DefaultFIFODir = "/var/run/containerd/fifo"
|
||||
// DefaultRuntime would be a multiple of choices, thus empty
|
||||
DefaultRuntime = ""
|
||||
// DefaultConfigDir is the default location for config files.
|
||||
DefaultConfigDir = "/etc/containerd"
|
||||
)
|
3
vendor/github.com/containerd/containerd/defaults/defaults_unix.go
generated
vendored
3
vendor/github.com/containerd/containerd/defaults/defaults_unix.go
generated
vendored
|
@ -1,4 +1,5 @@
|
|||
// +build !windows
|
||||
//go:build !windows && !darwin
|
||||
// +build !windows,!darwin
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
|
2
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
2
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
|
@ -1,5 +1,3 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
|
15
vendor/github.com/containerd/containerd/diff/diff.go
generated
vendored
15
vendor/github.com/containerd/containerd/diff/diff.go
generated
vendored
|
@ -18,6 +18,7 @@ package diff
|
|||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/gogo/protobuf/types"
|
||||
|
@ -37,6 +38,12 @@ type Config struct {
|
|||
|
||||
// Labels are the labels to apply to the generated content
|
||||
Labels map[string]string
|
||||
|
||||
// Compressor is a function to compress the diff stream
|
||||
// instead of the default gzip compressor. Differ passes
|
||||
// the MediaType of the target diff content to the compressor.
|
||||
// When using this config, MediaType must be specified as well.
|
||||
Compressor func(dest io.Writer, mediaType string) (io.WriteCloser, error)
|
||||
}
|
||||
|
||||
// Opt is used to configure a diff operation
|
||||
|
@ -71,6 +78,14 @@ type Applier interface {
|
|||
Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount, opts ...ApplyOpt) (ocispec.Descriptor, error)
|
||||
}
|
||||
|
||||
// WithCompressor sets the function to be used to compress the diff stream.
|
||||
func WithCompressor(f func(dest io.Writer, mediaType string) (io.WriteCloser, error)) Opt {
|
||||
return func(c *Config) error {
|
||||
c.Compressor = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMediaType sets the media type to use for creating the diff, without
|
||||
// specifying the differ will choose a default.
|
||||
func WithMediaType(m string) Opt {
|
||||
|
|
2
vendor/github.com/containerd/containerd/diff/stream.go
generated
vendored
2
vendor/github.com/containerd/containerd/diff/stream.go
generated
vendored
|
@ -18,6 +18,7 @@ package diff
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
|
@ -25,7 +26,6 @@ import (
|
|||
"github.com/containerd/containerd/images"
|
||||
"github.com/gogo/protobuf/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
5
vendor/github.com/containerd/containerd/diff/stream_unix.go
generated
vendored
5
vendor/github.com/containerd/containerd/diff/stream_unix.go
generated
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
|
@ -21,15 +22,15 @@ package diff
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
// NewBinaryProcessor returns a binary processor for use with processing content streams
|
||||
|
|
9
vendor/github.com/containerd/containerd/diff/stream_windows.go
generated
vendored
9
vendor/github.com/containerd/containerd/diff/stream_windows.go
generated
vendored
|
@ -1,5 +1,3 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
@ -21,19 +19,18 @@ package diff
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
const processorPipe = "STREAM_PROCESSOR_PIPE"
|
||||
|
@ -157,7 +154,7 @@ func (c *binaryProcessor) Close() error {
|
|||
}
|
||||
|
||||
func getUiqPath() (string, error) {
|
||||
dir, err := ioutil.TempDir("", "")
|
||||
dir, err := os.MkdirTemp("", "")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
5
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
5
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
|
@ -17,7 +17,7 @@
|
|||
// Package errdefs defines the common errors used throughout containerd
|
||||
// packages.
|
||||
//
|
||||
// Use with errors.Wrap and error.Wrapf to add context to an error.
|
||||
// Use with fmt.Errorf to add context to an error.
|
||||
//
|
||||
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||
// is of a certain type.
|
||||
|
@ -28,8 +28,7 @@ package errdefs
|
|||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Definitions of common error types used throughout containerd. All containerd
|
||||
|
|
10
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
10
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
|
@ -18,9 +18,9 @@ package errdefs
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
@ -68,9 +68,9 @@ func ToGRPC(err error) error {
|
|||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
|
||||
// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(errors.Wrapf(err, format, args...))
|
||||
return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
}
|
||||
|
||||
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
|
||||
|
@ -104,9 +104,9 @@ func FromGRPC(err error) error {
|
|||
|
||||
msg := rebaseMessage(cls, err)
|
||||
if msg != "" {
|
||||
err = errors.Wrap(cls, msg)
|
||||
err = fmt.Errorf("%s: %w", msg, cls)
|
||||
} else {
|
||||
err = errors.WithStack(cls)
|
||||
err = cls
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
24
vendor/github.com/containerd/containerd/events/exchange/exchange.go
generated
vendored
24
vendor/github.com/containerd/containerd/events/exchange/exchange.go
generated
vendored
|
@ -18,6 +18,7 @@ package exchange
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -30,7 +31,6 @@ import (
|
|||
"github.com/containerd/typeurl"
|
||||
goevents "github.com/docker/go-events"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -88,10 +88,10 @@ func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event
|
|||
|
||||
namespace, err = namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed publishing event")
|
||||
return fmt.Errorf("failed publishing event: %w", err)
|
||||
}
|
||||
if err := validateTopic(topic); err != nil {
|
||||
return errors.Wrapf(err, "envelope topic %q", topic)
|
||||
return fmt.Errorf("envelope topic %q: %w", topic, err)
|
||||
}
|
||||
|
||||
encoded, err = typeurl.MarshalAny(event)
|
||||
|
@ -150,7 +150,7 @@ func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *even
|
|||
if len(fs) > 0 {
|
||||
filter, err := filters.ParseAll(fs...)
|
||||
if err != nil {
|
||||
errq <- errors.Wrapf(err, "failed parsing subscription filters")
|
||||
errq <- fmt.Errorf("failed parsing subscription filters: %w", err)
|
||||
closeAll()
|
||||
return
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *even
|
|||
// TODO(stevvooe): For the most part, we are well protected
|
||||
// from this condition. Both Forward and Publish protect
|
||||
// from this.
|
||||
err = errors.Errorf("invalid envelope encountered %#v; please file a bug", ev)
|
||||
err = fmt.Errorf("invalid envelope encountered %#v; please file a bug", ev)
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -203,21 +203,21 @@ func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *even
|
|||
|
||||
func validateTopic(topic string) error {
|
||||
if topic == "" {
|
||||
return errors.Wrap(errdefs.ErrInvalidArgument, "must not be empty")
|
||||
return fmt.Errorf("must not be empty: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
if topic[0] != '/' {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "must start with '/'")
|
||||
return fmt.Errorf("must start with '/': %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
if len(topic) == 1 {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "must have at least one component")
|
||||
return fmt.Errorf("must have at least one component: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
components := strings.Split(topic[1:], "/")
|
||||
for _, component := range components {
|
||||
if err := identifiers.Validate(component); err != nil {
|
||||
return errors.Wrapf(err, "failed validation on component %q", component)
|
||||
return fmt.Errorf("failed validation on component %q: %w", component, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -226,15 +226,15 @@ func validateTopic(topic string) error {
|
|||
|
||||
func validateEnvelope(envelope *events.Envelope) error {
|
||||
if err := identifiers.Validate(envelope.Namespace); err != nil {
|
||||
return errors.Wrapf(err, "event envelope has invalid namespace")
|
||||
return fmt.Errorf("event envelope has invalid namespace: %w", err)
|
||||
}
|
||||
|
||||
if err := validateTopic(envelope.Topic); err != nil {
|
||||
return errors.Wrapf(err, "envelope topic %q", envelope.Topic)
|
||||
return fmt.Errorf("envelope topic %q: %w", envelope.Topic, err)
|
||||
}
|
||||
|
||||
if envelope.Timestamp.IsZero() {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "timestamp must be set on forwarded event")
|
||||
return fmt.Errorf("timestamp must be set on forwarded event: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
9
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
9
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
|
@ -21,7 +21,6 @@ import (
|
|||
"io"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
/*
|
||||
|
@ -71,7 +70,7 @@ func ParseAll(ss ...string) (Filter, error) {
|
|||
for _, s := range ss {
|
||||
f, err := Parse(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error())
|
||||
return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
fs = append(fs, f)
|
||||
|
@ -90,7 +89,7 @@ func (p *parser) parse() (Filter, error) {
|
|||
|
||||
ss, err := p.selectors()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filters")
|
||||
return nil, fmt.Errorf("filters: %w", err)
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
|
@ -284,9 +283,9 @@ func (pe parseError) Error() string {
|
|||
}
|
||||
|
||||
func (p *parser) mkerr(pos int, format string, args ...interface{}) error {
|
||||
return errors.Wrap(parseError{
|
||||
return fmt.Errorf("parse error: %w", parseError{
|
||||
input: p.input,
|
||||
pos: pos,
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
}, "parse error")
|
||||
})
|
||||
}
|
||||
|
|
3
vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
3
vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
|
@ -17,9 +17,8 @@
|
|||
package filters
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NOTE(stevvooe): Most of this code in this file is copied from the stdlib
|
||||
|
|
2
vendor/github.com/containerd/containerd/gc/gc.go
generated
vendored
2
vendor/github.com/containerd/containerd/gc/gc.go
generated
vendored
|
@ -59,6 +59,8 @@ type Stats interface {
|
|||
//
|
||||
// We can probably use this to inform a design for incremental GC by injecting
|
||||
// callbacks to the set modification algorithms.
|
||||
//
|
||||
// https://en.wikipedia.org/wiki/Tracing_garbage_collection#Tri-color_marking
|
||||
func Tricolor(roots []Node, refs func(ref Node) ([]Node, error)) (map[Node]struct{}, error) {
|
||||
var (
|
||||
grays []Node // maintain a gray "stack"
|
||||
|
|
8
vendor/github.com/containerd/containerd/identifiers/validate.go
generated
vendored
8
vendor/github.com/containerd/containerd/identifiers/validate.go
generated
vendored
|
@ -25,10 +25,10 @@
|
|||
package identifiers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -51,15 +51,15 @@ var (
|
|||
// In general identifiers that pass this validation should be safe for use as filesystem path components.
|
||||
func Validate(s string) error {
|
||||
if len(s) == 0 {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier must not be empty")
|
||||
return fmt.Errorf("identifier must not be empty: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
if len(s) > maxLength {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q greater than maximum length (%d characters)", s, maxLength)
|
||||
return fmt.Errorf("identifier %q greater than maximum length (%d characters): %w", s, maxLength, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
if !identifierRe.MatchString(s) {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q must match %v", s, identifierRe)
|
||||
return fmt.Errorf("identifier %q must match %v: %w", s, identifierRe, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
12
vendor/github.com/containerd/containerd/image.go
generated
vendored
12
vendor/github.com/containerd/containerd/image.go
generated
vendored
|
@ -19,6 +19,7 @@ package containerd
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
@ -33,7 +34,6 @@ import (
|
|||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
|
@ -61,6 +61,8 @@ type Image interface {
|
|||
ContentStore() content.Store
|
||||
// Metadata returns the underlying image metadata
|
||||
Metadata() images.Image
|
||||
// Platform returns the platform match comparer. Can be nil.
|
||||
Platform() platforms.MatchComparer
|
||||
}
|
||||
|
||||
type usageOptions struct {
|
||||
|
@ -397,10 +399,10 @@ func (i *image) getLayers(ctx context.Context, platform platforms.MatchComparer,
|
|||
cs := i.ContentStore()
|
||||
diffIDs, err := i.i.RootFS(ctx, cs, platform)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to resolve rootfs")
|
||||
return nil, fmt.Errorf("failed to resolve rootfs: %w", err)
|
||||
}
|
||||
if len(diffIDs) != len(manifest.Layers) {
|
||||
return nil, errors.Errorf("mismatched image rootfs and manifest layers")
|
||||
return nil, errors.New("mismatched image rootfs and manifest layers")
|
||||
}
|
||||
layers := make([]rootfs.Layer, len(diffIDs))
|
||||
for i := range diffIDs {
|
||||
|
@ -448,3 +450,7 @@ func (i *image) checkSnapshotterSupport(ctx context.Context, snapshotterName str
|
|||
func (i *image) ContentStore() content.Store {
|
||||
return i.client.ContentStore()
|
||||
}
|
||||
|
||||
func (i *image) Platform() platforms.MatchComparer {
|
||||
return i.platform
|
||||
}
|
||||
|
|
20
vendor/github.com/containerd/containerd/images/archive/exporter.go
generated
vendored
20
vendor/github.com/containerd/containerd/images/archive/exporter.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
"archive/tar"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sort"
|
||||
|
@ -31,7 +32,6 @@ import (
|
|||
digest "github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type exportOptions struct {
|
||||
|
@ -230,7 +230,7 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts
|
|||
manifest: manifests[0],
|
||||
}
|
||||
} else if eo.platform != nil {
|
||||
return errors.Wrap(errdefs.ErrNotFound, "no manifest found for platform")
|
||||
return fmt.Errorf("no manifest found for platform: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
}
|
||||
resolvedIndex[desc.Digest] = d
|
||||
|
@ -243,14 +243,14 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts
|
|||
|
||||
}
|
||||
default:
|
||||
return errors.Wrap(errdefs.ErrInvalidArgument, "only manifests may be exported")
|
||||
return fmt.Errorf("only manifests may be exported: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
}
|
||||
|
||||
if len(dManifests) > 0 {
|
||||
tr, err := manifestsRecord(ctx, store, dManifests)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to create manifests file")
|
||||
return fmt.Errorf("unable to create manifests file: %w", err)
|
||||
}
|
||||
|
||||
records = append(records, tr)
|
||||
|
@ -316,7 +316,7 @@ func blobRecord(cs content.Provider, desc ocispec.Descriptor, opts *blobRecordOp
|
|||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
r, err := cs.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to get reader")
|
||||
return 0, fmt.Errorf("failed to get reader: %w", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
|
@ -325,10 +325,10 @@ func blobRecord(cs content.Provider, desc ocispec.Descriptor, opts *blobRecordOp
|
|||
|
||||
n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to copy to tar")
|
||||
return 0, fmt.Errorf("failed to copy to tar: %w", err)
|
||||
}
|
||||
if dgstr.Digest() != desc.Digest {
|
||||
return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
|
||||
return 0, fmt.Errorf("unexpected digest %s copied", dgstr.Digest())
|
||||
}
|
||||
return n, nil
|
||||
},
|
||||
|
@ -424,7 +424,7 @@ func manifestsRecord(ctx context.Context, store content.Provider, manifests map[
|
|||
return tarRecord{}, err
|
||||
}
|
||||
if err := manifest.Config.Digest.Validate(); err != nil {
|
||||
return tarRecord{}, errors.Wrapf(err, "invalid manifest %q", m.manifest.Digest)
|
||||
return tarRecord{}, fmt.Errorf("invalid manifest %q: %w", m.manifest.Digest, err)
|
||||
}
|
||||
|
||||
dgst := manifest.Config.Digest
|
||||
|
@ -491,10 +491,10 @@ func writeTar(ctx context.Context, tw *tar.Writer, recordsWithEmpty []tarRecord)
|
|||
return err
|
||||
}
|
||||
if n != record.Header.Size {
|
||||
return errors.Errorf("unexpected copy size for %s", record.Header.Name)
|
||||
return fmt.Errorf("unexpected copy size for %s", record.Header.Name)
|
||||
}
|
||||
} else if record.Header.Size > 0 {
|
||||
return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
|
||||
return fmt.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
96
vendor/github.com/containerd/containerd/images/archive/importer.go
generated
vendored
96
vendor/github.com/containerd/containerd/images/archive/importer.go
generated
vendored
|
@ -22,9 +22,9 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
|
||||
"github.com/containerd/containerd/archive/compression"
|
||||
|
@ -32,10 +32,10 @@ import (
|
|||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type importOpts struct {
|
||||
|
@ -104,16 +104,16 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt
|
|||
hdrName := path.Clean(hdr.Name)
|
||||
if hdrName == ocispec.ImageLayoutFile {
|
||||
if err = onUntarJSON(tr, &ociLayout); err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrapf(err, "untar oci layout %q", hdr.Name)
|
||||
return ocispec.Descriptor{}, fmt.Errorf("untar oci layout %q: %w", hdr.Name, err)
|
||||
}
|
||||
} else if hdrName == "manifest.json" {
|
||||
if err = onUntarJSON(tr, &mfsts); err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrapf(err, "untar manifest %q", hdr.Name)
|
||||
return ocispec.Descriptor{}, fmt.Errorf("untar manifest %q: %w", hdr.Name, err)
|
||||
}
|
||||
} else {
|
||||
dgst, err := onUntarBlob(ctx, tr, store, hdr.Size, "tar-"+hdrName)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrapf(err, "failed to ingest %q", hdr.Name)
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to ingest %q: %w", hdr.Name, err)
|
||||
}
|
||||
|
||||
blobs[hdrName] = ocispec.Descriptor{
|
||||
|
@ -128,12 +128,12 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt
|
|||
// as Docker v1.1 or v1.2.
|
||||
if ociLayout.Version != "" {
|
||||
if ociLayout.Version != ocispec.ImageLayoutVersion {
|
||||
return ocispec.Descriptor{}, errors.Errorf("unsupported OCI version %s", ociLayout.Version)
|
||||
return ocispec.Descriptor{}, fmt.Errorf("unsupported OCI version %s", ociLayout.Version)
|
||||
}
|
||||
|
||||
idx, ok := blobs["index.json"]
|
||||
if !ok {
|
||||
return ocispec.Descriptor{}, errors.Errorf("missing index.json in OCI layout %s", ocispec.ImageLayoutVersion)
|
||||
return ocispec.Descriptor{}, fmt.Errorf("missing index.json in OCI layout %s", ocispec.ImageLayoutVersion)
|
||||
}
|
||||
|
||||
idx.MediaType = ocispec.MediaTypeImageIndex
|
||||
|
@ -141,13 +141,13 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt
|
|||
}
|
||||
|
||||
if mfsts == nil {
|
||||
return ocispec.Descriptor{}, errors.Errorf("unrecognized image format")
|
||||
return ocispec.Descriptor{}, errors.New("unrecognized image format")
|
||||
}
|
||||
|
||||
for name, linkname := range symlinks {
|
||||
desc, ok := blobs[linkname]
|
||||
if !ok {
|
||||
return ocispec.Descriptor{}, errors.Errorf("no target for symlink layer from %q to %q", name, linkname)
|
||||
return ocispec.Descriptor{}, fmt.Errorf("no target for symlink layer from %q to %q", name, linkname)
|
||||
}
|
||||
blobs[name] = desc
|
||||
}
|
||||
|
@ -160,13 +160,13 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt
|
|||
for _, mfst := range mfsts {
|
||||
config, ok := blobs[mfst.Config]
|
||||
if !ok {
|
||||
return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config)
|
||||
return ocispec.Descriptor{}, fmt.Errorf("image config %q not found", mfst.Config)
|
||||
}
|
||||
config.MediaType = images.MediaTypeDockerSchema2Config
|
||||
|
||||
layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to resolve layers: %w", err)
|
||||
}
|
||||
|
||||
manifest := struct {
|
||||
|
@ -183,18 +183,28 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt
|
|||
|
||||
desc, err := writeManifest(ctx, store, manifest, manifest.MediaType)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "write docker manifest")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("write docker manifest: %w", err)
|
||||
}
|
||||
|
||||
platforms, err := images.Platforms(ctx, store, desc)
|
||||
imgPlatforms, err := images.Platforms(ctx, store, desc)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "unable to resolve platform")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("unable to resolve platform: %w", err)
|
||||
}
|
||||
if len(platforms) > 0 {
|
||||
if len(imgPlatforms) > 0 {
|
||||
// Only one platform can be resolved from non-index manifest,
|
||||
// The platform can only come from the config included above,
|
||||
// if the config has no platform it can be safely omitted.
|
||||
desc.Platform = &platforms[0]
|
||||
desc.Platform = &imgPlatforms[0]
|
||||
|
||||
// If the image we've just imported is a Windows image without the OSVersion set,
|
||||
// we could just assume it matches this host's OS Version. Without this, the
|
||||
// children labels might not be set on the image content, leading to it being
|
||||
// garbage collected, breaking the image.
|
||||
// See: https://github.com/containerd/containerd/issues/5690
|
||||
if desc.Platform.OS == "windows" && desc.Platform.OSVersion == "" {
|
||||
platform := platforms.DefaultSpec()
|
||||
desc.Platform.OSVersion = platform.OSVersion
|
||||
}
|
||||
}
|
||||
|
||||
if len(mfst.RepoTags) == 0 {
|
||||
|
@ -223,7 +233,7 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt
|
|||
}
|
||||
|
||||
func onUntarJSON(r io.Reader, j interface{}) error {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
b, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -247,7 +257,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
|
|||
for i, f := range layerFiles {
|
||||
desc, ok := blobs[f]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("layer %q not found", f)
|
||||
return nil, fmt.Errorf("layer %q not found", f)
|
||||
}
|
||||
layers[i] = desc
|
||||
descs[desc.Digest] = &layers[i]
|
||||
|
@ -259,15 +269,19 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
|
|||
if ok {
|
||||
desc := descs[digest.Digest(dgst)]
|
||||
if desc != nil {
|
||||
desc.MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||
desc.Digest = info.Digest
|
||||
desc.Size = info.Size
|
||||
mediaType, err := detectLayerMediaType(ctx, store, *desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to detect media type of layer: %w", err)
|
||||
}
|
||||
desc.MediaType = mediaType
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, filters...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failure checking for compressed blobs")
|
||||
return nil, fmt.Errorf("failure checking for compressed blobs: %w", err)
|
||||
}
|
||||
|
||||
for i, desc := range layers {
|
||||
|
@ -277,11 +291,12 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
|
|||
// Open blob, resolve media type
|
||||
ra, err := store.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to open %q (%s)", layerFiles[i], desc.Digest)
|
||||
return nil, fmt.Errorf("failed to open %q (%s): %w", layerFiles[i], desc.Digest, err)
|
||||
}
|
||||
s, err := compression.DecompressStream(content.NewReader(ra))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i])
|
||||
ra.Close()
|
||||
return nil, fmt.Errorf("failed to detect compression for %q: %w", layerFiles[i], err)
|
||||
}
|
||||
if s.GetCompression() == compression.Uncompressed {
|
||||
if compress {
|
||||
|
@ -292,6 +307,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
|
|||
layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels))
|
||||
if err != nil {
|
||||
s.Close()
|
||||
ra.Close()
|
||||
return nil, err
|
||||
}
|
||||
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||
|
@ -302,7 +318,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
|
|||
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||
}
|
||||
s.Close()
|
||||
|
||||
ra.Close()
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
@ -310,7 +326,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
|
|||
func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) {
|
||||
w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to open writer")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to open writer: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
@ -320,7 +336,7 @@ func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string
|
|||
}
|
||||
}()
|
||||
if err := w.Truncate(0); err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to truncate writer")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to truncate writer: %w", err)
|
||||
}
|
||||
|
||||
cw, err := compression.CompressStream(w, compression.Gzip)
|
||||
|
@ -337,7 +353,7 @@ func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string
|
|||
|
||||
cst, err := w.Status()
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to get writer status")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to get writer status: %w", err)
|
||||
}
|
||||
|
||||
desc.Digest = w.Digest()
|
||||
|
@ -345,7 +361,7 @@ func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string
|
|||
|
||||
if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to commit")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to commit: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -369,3 +385,29 @@ func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{
|
|||
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
func detectLayerMediaType(ctx context.Context, store content.Store, desc ocispec.Descriptor) (string, error) {
|
||||
var mediaType string
|
||||
// need to parse existing blob to use the proper media type
|
||||
bytes := make([]byte, 10)
|
||||
ra, err := store.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read content store to detect layer media type: %w", err)
|
||||
}
|
||||
defer ra.Close()
|
||||
_, err = ra.ReadAt(bytes, 0)
|
||||
if err != nil && err != io.EOF {
|
||||
return "", fmt.Errorf("failed to read header bytes from layer to detect media type: %w", err)
|
||||
}
|
||||
if err == io.EOF {
|
||||
// in the case of an empty layer then the media type should be uncompressed
|
||||
return images.MediaTypeDockerSchema2Layer, nil
|
||||
}
|
||||
switch c := compression.DetectCompression(bytes); c {
|
||||
case compression.Uncompressed:
|
||||
mediaType = images.MediaTypeDockerSchema2Layer
|
||||
default:
|
||||
mediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||
}
|
||||
return mediaType, nil
|
||||
}
|
||||
|
|
6
vendor/github.com/containerd/containerd/images/archive/reference.go
generated
vendored
6
vendor/github.com/containerd/containerd/images/archive/reference.go
generated
vendored
|
@ -17,12 +17,12 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/reference"
|
||||
distref "github.com/containerd/containerd/reference/docker"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// FilterRefPrefix restricts references to having the given image
|
||||
|
@ -72,7 +72,7 @@ func normalizeReference(ref string) (string, error) {
|
|||
// TODO: Replace this function to not depend on reference package
|
||||
normalized, err := distref.ParseDockerRef(ref)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "normalize image ref %q", ref)
|
||||
return "", fmt.Errorf("normalize image ref %q: %w", ref, err)
|
||||
}
|
||||
|
||||
return normalized.String(), nil
|
||||
|
@ -81,7 +81,7 @@ func normalizeReference(ref string) (string, error) {
|
|||
func familiarizeReference(ref string) (string, error) {
|
||||
named, err := distref.ParseNormalizedNamed(ref)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse %q", ref)
|
||||
return "", fmt.Errorf("failed to parse %q: %w", ref, err)
|
||||
}
|
||||
named = distref.TagNameOnly(named)
|
||||
|
||||
|
|
42
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
42
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
|
@ -18,6 +18,7 @@ package images
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
|
@ -25,7 +26,6 @@ import (
|
|||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
@ -33,13 +33,17 @@ import (
|
|||
var (
|
||||
// ErrSkipDesc is used to skip processing of a descriptor and
|
||||
// its descendants.
|
||||
ErrSkipDesc = fmt.Errorf("skip descriptor")
|
||||
ErrSkipDesc = errors.New("skip descriptor")
|
||||
|
||||
// ErrStopHandler is used to signify that the descriptor
|
||||
// has been handled and should not be handled further.
|
||||
// This applies only to a single descriptor in a handler
|
||||
// chain and does not apply to descendant descriptors.
|
||||
ErrStopHandler = fmt.Errorf("stop handler")
|
||||
ErrStopHandler = errors.New("stop handler")
|
||||
|
||||
// ErrEmptyWalk is used when the WalkNotEmpty handlers return no
|
||||
// children (e.g.: they were filtered out).
|
||||
ErrEmptyWalk = errors.New("image might be filtered out")
|
||||
)
|
||||
|
||||
// Handler handles image manifests
|
||||
|
@ -99,6 +103,36 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err
|
|||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WalkNotEmpty works the same way Walk does, with the exception that it ensures that
|
||||
// some children are still found by Walking the descriptors (for example, not all of
|
||||
// them have been filtered out by one of the handlers). If there are no children,
|
||||
// then an ErrEmptyWalk error is returned.
|
||||
func WalkNotEmpty(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error {
|
||||
isEmpty := true
|
||||
var notEmptyHandler HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
children, err := handler.Handle(ctx, desc)
|
||||
if err != nil {
|
||||
return children, err
|
||||
}
|
||||
|
||||
if len(children) > 0 {
|
||||
isEmpty = false
|
||||
}
|
||||
|
||||
return children, nil
|
||||
}
|
||||
|
||||
err := Walk(ctx, notEmptyHandler, descs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isEmpty {
|
||||
return ErrEmptyWalk
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -274,7 +308,7 @@ func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc
|
|||
|
||||
if n > 0 {
|
||||
if len(children) == 0 {
|
||||
return children, errors.Wrap(errdefs.ErrNotFound, "no match for platform in manifest")
|
||||
return children, fmt.Errorf("no match for platform in manifest: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
if len(children) > n {
|
||||
children = children[:n]
|
||||
|
|
21
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
21
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
|
@ -29,7 +29,6 @@ import (
|
|||
"github.com/containerd/containerd/platforms"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Image provides the model for how containerd views container images.
|
||||
|
@ -115,7 +114,7 @@ func (image *Image) Size(ctx context.Context, provider content.Provider, platfor
|
|||
var size int64
|
||||
return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if desc.Size < 0 {
|
||||
return nil, errors.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType)
|
||||
return nil, fmt.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType)
|
||||
}
|
||||
size += desc.Size
|
||||
return nil, nil
|
||||
|
@ -156,7 +155,7 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
|
|||
}
|
||||
|
||||
if err := validateMediaType(p, desc.MediaType); err != nil {
|
||||
return nil, errors.Wrapf(err, "manifest: invalid desc %s", desc.Digest)
|
||||
return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err)
|
||||
}
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
|
@ -200,7 +199,7 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
|
|||
}
|
||||
|
||||
if err := validateMediaType(p, desc.MediaType); err != nil {
|
||||
return nil, errors.Wrapf(err, "manifest: invalid desc %s", desc.Digest)
|
||||
return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err)
|
||||
}
|
||||
|
||||
var idx ocispec.Index
|
||||
|
@ -236,15 +235,15 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
|
|||
}
|
||||
return descs, nil
|
||||
}
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
|
||||
return nil, fmt.Errorf("unexpected media type %v for %v: %w", desc.MediaType, desc.Digest, errdefs.ErrNotFound)
|
||||
}), image); err != nil {
|
||||
return ocispec.Manifest{}, err
|
||||
}
|
||||
|
||||
if len(m) == 0 {
|
||||
err := errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest)
|
||||
err := fmt.Errorf("manifest %v: %w", image.Digest, errdefs.ErrNotFound)
|
||||
if wasIndex {
|
||||
err = errors.Wrapf(errdefs.ErrNotFound, "no match for platform in manifest %v", image.Digest)
|
||||
err = fmt.Errorf("no match for platform in manifest %v: %w", image.Digest, errdefs.ErrNotFound)
|
||||
}
|
||||
return ocispec.Manifest{}, err
|
||||
}
|
||||
|
@ -309,7 +308,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip
|
|||
return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil
|
||||
}
|
||||
|
||||
return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest)
|
||||
return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", image.Digest, err)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): It is possible that referenced conponents could have
|
||||
|
@ -324,7 +323,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip
|
|||
missing = append(missing, desc)
|
||||
continue
|
||||
} else {
|
||||
return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest)
|
||||
return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", desc.Digest, err)
|
||||
}
|
||||
}
|
||||
ra.Close()
|
||||
|
@ -346,7 +345,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
|
|||
}
|
||||
|
||||
if err := validateMediaType(p, desc.MediaType); err != nil {
|
||||
return nil, errors.Wrapf(err, "children: invalid desc %s", desc.Digest)
|
||||
return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): We just assume oci manifest, for now. There may be
|
||||
|
@ -365,7 +364,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
|
|||
}
|
||||
|
||||
if err := validateMediaType(p, desc.MediaType); err != nil {
|
||||
return nil, errors.Wrapf(err, "children: invalid desc %s", desc.Digest)
|
||||
return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err)
|
||||
}
|
||||
|
||||
var index ocispec.Index
|
||||
|
|
4
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
4
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
|
@ -18,12 +18,12 @@ package images
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// mediatype definitions for image components handled in containerd.
|
||||
|
@ -87,7 +87,7 @@ func DiffCompression(ctx context.Context, mediaType string) (string, error) {
|
|||
}
|
||||
return "", nil
|
||||
default:
|
||||
return "", errors.Wrapf(errdefs.ErrNotImplemented, "unrecognised mediatype %s", mediaType)
|
||||
return "", fmt.Errorf("unrecognised mediatype %s: %w", mediaType, errdefs.ErrNotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
46
vendor/github.com/containerd/containerd/import.go
generated
vendored
46
vendor/github.com/containerd/containerd/import.go
generated
vendored
|
@ -31,11 +31,13 @@ import (
|
|||
)
|
||||
|
||||
type importOpts struct {
|
||||
indexName string
|
||||
imageRefT func(string) string
|
||||
dgstRefT func(digest.Digest) string
|
||||
allPlatforms bool
|
||||
compress bool
|
||||
indexName string
|
||||
imageRefT func(string) string
|
||||
dgstRefT func(digest.Digest) string
|
||||
skipDgstRef func(string) bool
|
||||
allPlatforms bool
|
||||
platformMatcher platforms.MatchComparer
|
||||
compress bool
|
||||
}
|
||||
|
||||
// ImportOpt allows the caller to specify import specific options
|
||||
|
@ -59,6 +61,17 @@ func WithDigestRef(f func(digest.Digest) string) ImportOpt {
|
|||
}
|
||||
}
|
||||
|
||||
// WithSkipDigestRef is used to specify when to skip applying
|
||||
// WithDigestRef. The callback receives an image reference (or an empty
|
||||
// string if not specified in the image). When the callback returns true,
|
||||
// the skip occurs.
|
||||
func WithSkipDigestRef(f func(string) bool) ImportOpt {
|
||||
return func(c *importOpts) error {
|
||||
c.skipDgstRef = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithIndexName creates a tag pointing to the imported index
|
||||
func WithIndexName(name string) ImportOpt {
|
||||
return func(c *importOpts) error {
|
||||
|
@ -75,6 +88,14 @@ func WithAllPlatforms(allPlatforms bool) ImportOpt {
|
|||
}
|
||||
}
|
||||
|
||||
// WithImportPlatform is used to import content for specific platform.
|
||||
func WithImportPlatform(platformMacher platforms.MatchComparer) ImportOpt {
|
||||
return func(c *importOpts) error {
|
||||
c.platformMatcher = platformMacher
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithImportCompression compresses uncompressed layers on import.
|
||||
// This is used for import formats which do not include the manifest.
|
||||
func WithImportCompression() ImportOpt {
|
||||
|
@ -123,9 +144,11 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
|
|||
Target: index,
|
||||
})
|
||||
}
|
||||
var platformMatcher = platforms.All
|
||||
if !iopts.allPlatforms {
|
||||
platformMatcher = c.platform
|
||||
var platformMatcher = c.platform
|
||||
if iopts.allPlatforms {
|
||||
platformMatcher = platforms.All
|
||||
} else if iopts.platformMatcher != nil {
|
||||
platformMatcher = iopts.platformMatcher
|
||||
}
|
||||
|
||||
var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
|
@ -152,6 +175,11 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
|
|||
Target: m,
|
||||
})
|
||||
}
|
||||
if iopts.skipDgstRef != nil {
|
||||
if iopts.skipDgstRef(name) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if iopts.dgstRefT != nil {
|
||||
ref := iopts.dgstRefT(m.Digest)
|
||||
if ref != "" {
|
||||
|
@ -168,7 +196,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
|
|||
|
||||
handler = images.FilterPlatforms(handler, platformMatcher)
|
||||
handler = images.SetChildrenLabels(cs, handler)
|
||||
if err := images.Walk(ctx, handler, index); err != nil {
|
||||
if err := images.WalkNotEmpty(ctx, handler, index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
8
vendor/github.com/containerd/containerd/install.go
generated
vendored
8
vendor/github.com/containerd/containerd/install.go
generated
vendored
|
@ -19,6 +19,8 @@ package containerd
|
|||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
@ -28,7 +30,6 @@ import (
|
|||
"github.com/containerd/containerd/archive/compression"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Install a binary image into the opt service
|
||||
|
@ -66,6 +67,7 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts)
|
|||
cr := content.NewReader(ra)
|
||||
r, err := compression.DecompressStream(cr)
|
||||
if err != nil {
|
||||
ra.Close()
|
||||
return err
|
||||
}
|
||||
if _, err := archive.Apply(ctx, path, r, archive.WithFilter(func(hdr *tar.Header) (bool, error) {
|
||||
|
@ -81,15 +83,17 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts)
|
|||
}
|
||||
if result && !config.Replace {
|
||||
if _, err := os.Lstat(filepath.Join(path, hdr.Name)); err == nil {
|
||||
return false, errors.Errorf("cannot replace %s in %s", hdr.Name, path)
|
||||
return false, fmt.Errorf("cannot replace %s in %s", hdr.Name, path)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
})); err != nil {
|
||||
r.Close()
|
||||
ra.Close()
|
||||
return err
|
||||
}
|
||||
r.Close()
|
||||
ra.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
5
vendor/github.com/containerd/containerd/labels/validate.go
generated
vendored
5
vendor/github.com/containerd/containerd/labels/validate.go
generated
vendored
|
@ -17,8 +17,9 @@
|
|||
package labels
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -31,7 +32,7 @@ func Validate(k, v string) error {
|
|||
if len(k) > 10 {
|
||||
k = k[:10]
|
||||
}
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "label key and value greater than maximum size (%d bytes), key: %s", maxSize, k)
|
||||
return fmt.Errorf("label key and value greater than maximum size (%d bytes), key: %s: %w", maxSize, k, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue