소스 검색

Merge pull request #43515 from corhere/swarmkit-v2

Bump swarmkit to v2
Sebastiaan van Stijn 3 년 전
부모
커밋
647aede6ad
100개의 변경된 파일1892개의 추가작업 그리고 422개의 파일을 삭제
  1. 1 1
      cmd/dockerd/daemon.go
  2. 1 1
      container/container.go
  3. 2 2
      daemon/cluster/cluster.go
  4. 1 1
      daemon/cluster/configs.go
  5. 1 1
      daemon/cluster/controllers/plugin/controller.go
  6. 1 1
      daemon/cluster/convert/config.go
  7. 1 1
      daemon/cluster/convert/container.go
  8. 1 1
      daemon/cluster/convert/network.go
  9. 1 1
      daemon/cluster/convert/network_test.go
  10. 1 1
      daemon/cluster/convert/node.go
  11. 1 1
      daemon/cluster/convert/secret.go
  12. 2 2
      daemon/cluster/convert/service.go
  13. 1 1
      daemon/cluster/convert/service_test.go
  14. 2 2
      daemon/cluster/convert/swarm.go
  15. 1 1
      daemon/cluster/convert/task.go
  16. 1 1
      daemon/cluster/executor/backend.go
  17. 3 3
      daemon/cluster/executor/container/adapter.go
  18. 1 1
      daemon/cluster/executor/container/adapter_test.go
  19. 2 2
      daemon/cluster/executor/container/attachment.go
  20. 4 4
      daemon/cluster/executor/container/container.go
  21. 1 1
      daemon/cluster/executor/container/container_test.go
  22. 3 3
      daemon/cluster/executor/container/controller.go
  23. 6 6
      daemon/cluster/executor/container/executor.go
  24. 1 1
      daemon/cluster/executor/container/health_test.go
  25. 1 1
      daemon/cluster/executor/container/validate.go
  26. 1 1
      daemon/cluster/executor/container/validate_test.go
  27. 1 1
      daemon/cluster/executor/container/validate_windows_test.go
  28. 1 1
      daemon/cluster/filters.go
  29. 1 1
      daemon/cluster/helpers.go
  30. 1 1
      daemon/cluster/networks.go
  31. 3 3
      daemon/cluster/noderunner.go
  32. 1 1
      daemon/cluster/nodes.go
  33. 1 1
      daemon/cluster/secrets.go
  34. 1 1
      daemon/cluster/services.go
  35. 3 3
      daemon/cluster/swarm.go
  36. 1 1
      daemon/cluster/tasks.go
  37. 1 1
      daemon/config/opts.go
  38. 1 1
      daemon/dependency.go
  39. 1 1
      daemon/events.go
  40. 2 2
      daemon/oci_windows_test.go
  41. 1 1
      integration-cli/docker_api_swarm_test.go
  42. 1 1
      integration-cli/docker_cli_swarm_test.go
  43. 13 8
      vendor.mod
  44. 15 12
      vendor.sum
  45. 81 3
      vendor/cloud.google.com/go/doc.go
  46. 2 0
      vendor/github.com/hashicorp/go-memdb/.gitignore
  47. 80 27
      vendor/github.com/hashicorp/go-memdb/README.md
  48. 34 0
      vendor/github.com/hashicorp/go-memdb/changes.go
  49. 38 0
      vendor/github.com/hashicorp/go-memdb/filter.go
  50. 501 21
      vendor/github.com/hashicorp/go-memdb/index.go
  51. 37 18
      vendor/github.com/hashicorp/go-memdb/memdb.go
  52. 42 13
      vendor/github.com/hashicorp/go-memdb/schema.go
  53. 494 34
      vendor/github.com/hashicorp/go-memdb/txn.go
  54. 144 0
      vendor/github.com/hashicorp/go-memdb/watch.go
  55. 117 0
      vendor/github.com/hashicorp/go-memdb/watch_few.go
  56. 8 0
      vendor/github.com/ishidawataru/sctp/sctp.go
  57. 0 0
      vendor/github.com/moby/swarmkit/v2/LICENSE
  58. 3 3
      vendor/github.com/moby/swarmkit/v2/agent/agent.go
  59. 3 3
      vendor/github.com/moby/swarmkit/v2/agent/config.go
  60. 2 2
      vendor/github.com/moby/swarmkit/v2/agent/configs/configs.go
  61. 1 1
      vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/manager.go
  62. 2 2
      vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/plugin.go
  63. 5 5
      vendor/github.com/moby/swarmkit/v2/agent/csi/volumes.go
  64. 5 5
      vendor/github.com/moby/swarmkit/v2/agent/dependency.go
  65. 0 0
      vendor/github.com/moby/swarmkit/v2/agent/errors.go
  66. 4 4
      vendor/github.com/moby/swarmkit/v2/agent/exec/controller.go
  67. 1 1
      vendor/github.com/moby/swarmkit/v2/agent/exec/controller_stub.go
  68. 0 0
      vendor/github.com/moby/swarmkit/v2/agent/exec/errors.go
  69. 1 1
      vendor/github.com/moby/swarmkit/v2/agent/exec/executor.go
  70. 0 0
      vendor/github.com/moby/swarmkit/v2/agent/helpers.go
  71. 2 2
      vendor/github.com/moby/swarmkit/v2/agent/reporter.go
  72. 1 1
      vendor/github.com/moby/swarmkit/v2/agent/resource.go
  73. 3 3
      vendor/github.com/moby/swarmkit/v2/agent/secrets/secrets.go
  74. 3 3
      vendor/github.com/moby/swarmkit/v2/agent/session.go
  75. 1 1
      vendor/github.com/moby/swarmkit/v2/agent/storage.go
  76. 4 4
      vendor/github.com/moby/swarmkit/v2/agent/task.go
  77. 4 4
      vendor/github.com/moby/swarmkit/v2/agent/worker.go
  78. 0 0
      vendor/github.com/moby/swarmkit/v2/api/README.md
  79. 0 0
      vendor/github.com/moby/swarmkit/v2/api/api.pb.txt
  80. 6 6
      vendor/github.com/moby/swarmkit/v2/api/ca.pb.go
  81. 0 0
      vendor/github.com/moby/swarmkit/v2/api/ca.proto
  82. 70 70
      vendor/github.com/moby/swarmkit/v2/api/control.pb.go
  83. 0 0
      vendor/github.com/moby/swarmkit/v2/api/control.proto
  84. 0 0
      vendor/github.com/moby/swarmkit/v2/api/deepcopy/copy.go
  85. 2 2
      vendor/github.com/moby/swarmkit/v2/api/defaults/service.go
  86. 18 18
      vendor/github.com/moby/swarmkit/v2/api/dispatcher.pb.go
  87. 0 0
      vendor/github.com/moby/swarmkit/v2/api/dispatcher.proto
  88. 1 1
      vendor/github.com/moby/swarmkit/v2/api/equality/equality.go
  89. 1 1
      vendor/github.com/moby/swarmkit/v2/api/genericresource/helpers.go
  90. 1 1
      vendor/github.com/moby/swarmkit/v2/api/genericresource/parse.go
  91. 1 1
      vendor/github.com/moby/swarmkit/v2/api/genericresource/resource_management.go
  92. 1 1
      vendor/github.com/moby/swarmkit/v2/api/genericresource/string.go
  93. 1 1
      vendor/github.com/moby/swarmkit/v2/api/genericresource/validate.go
  94. 2 2
      vendor/github.com/moby/swarmkit/v2/api/health.pb.go
  95. 0 0
      vendor/github.com/moby/swarmkit/v2/api/health.proto
  96. 13 13
      vendor/github.com/moby/swarmkit/v2/api/logbroker.pb.go
  97. 0 0
      vendor/github.com/moby/swarmkit/v2/api/logbroker.proto
  98. 1 1
      vendor/github.com/moby/swarmkit/v2/api/naming/naming.go
  99. 60 60
      vendor/github.com/moby/swarmkit/v2/api/objects.pb.go
  100. 0 0
      vendor/github.com/moby/swarmkit/v2/api/objects.proto

+ 1 - 1
cmd/dockerd/daemon.go

@@ -52,8 +52,8 @@ import (
 	"github.com/docker/docker/rootless"
 	"github.com/docker/docker/rootless"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/go-connections/tlsconfig"
 	"github.com/docker/go-connections/tlsconfig"
-	swarmapi "github.com/docker/swarmkit/api"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 	"github.com/spf13/pflag"
 	"github.com/spf13/pflag"

+ 1 - 1
container/container.go

@@ -36,7 +36,7 @@ import (
 	"github.com/docker/docker/volume"
 	"github.com/docker/docker/volume"
 	volumemounts "github.com/docker/docker/volume/mounts"
 	volumemounts "github.com/docker/docker/volume/mounts"
 	units "github.com/docker/go-units"
 	units "github.com/docker/go-units"
-	agentexec "github.com/docker/swarmkit/agent/exec"
+	agentexec "github.com/moby/swarmkit/v2/agent/exec"
 	"github.com/moby/sys/signal"
 	"github.com/moby/sys/signal"
 	"github.com/moby/sys/symlink"
 	"github.com/moby/sys/symlink"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"

+ 2 - 2
daemon/cluster/cluster.go

@@ -55,8 +55,8 @@ import (
 	executorpkg "github.com/docker/docker/daemon/cluster/executor"
 	executorpkg "github.com/docker/docker/daemon/cluster/executor"
 	lncluster "github.com/docker/docker/libnetwork/cluster"
 	lncluster "github.com/docker/docker/libnetwork/cluster"
 	"github.com/docker/docker/pkg/stack"
 	"github.com/docker/docker/pkg/stack"
-	swarmapi "github.com/docker/swarmkit/api"
-	swarmnode "github.com/docker/swarmkit/node"
+	swarmapi "github.com/moby/swarmkit/v2/api"
+	swarmnode "github.com/moby/swarmkit/v2/node"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"

+ 1 - 1
daemon/cluster/configs.go

@@ -6,7 +6,7 @@ import (
 	apitypes "github.com/docker/docker/api/types"
 	apitypes "github.com/docker/docker/api/types"
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/daemon/cluster/convert"
 	"github.com/docker/docker/daemon/cluster/convert"
-	swarmapi "github.com/docker/swarmkit/api"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
 )
 )
 
 

+ 1 - 1
daemon/cluster/controllers/plugin/controller.go

@@ -11,8 +11,8 @@ import (
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/plugin"
 	"github.com/docker/docker/plugin"
 	v2 "github.com/docker/docker/plugin/v2"
 	v2 "github.com/docker/docker/plugin/v2"
-	"github.com/docker/swarmkit/api"
 	"github.com/gogo/protobuf/proto"
 	"github.com/gogo/protobuf/proto"
+	"github.com/moby/swarmkit/v2/api"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 )
 )

+ 1 - 1
daemon/cluster/convert/config.go

@@ -3,8 +3,8 @@ package convert // import "github.com/docker/docker/daemon/cluster/convert"
 import (
 import (
 	swarmtypes "github.com/docker/docker/api/types/swarm"
 	swarmtypes "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
-	swarmapi "github.com/docker/swarmkit/api"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 // ConfigFromGRPC converts a grpc Config to a Config.
 // ConfigFromGRPC converts a grpc Config to a Config.

+ 1 - 1
daemon/cluster/convert/container.go

@@ -8,8 +8,8 @@ import (
 	mounttypes "github.com/docker/docker/api/types/mount"
 	mounttypes "github.com/docker/docker/api/types/mount"
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	"github.com/docker/go-units"
 	"github.com/docker/go-units"
-	swarmapi "github.com/docker/swarmkit/api"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 )
 )

+ 1 - 1
daemon/cluster/convert/network.go

@@ -7,8 +7,8 @@ import (
 	networktypes "github.com/docker/docker/api/types/network"
 	networktypes "github.com/docker/docker/api/types/network"
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	netconst "github.com/docker/docker/libnetwork/datastore"
 	netconst "github.com/docker/docker/libnetwork/datastore"
-	swarmapi "github.com/docker/swarmkit/api"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 func networkAttachmentFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment {
 func networkAttachmentFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment {

+ 1 - 1
daemon/cluster/convert/network_test.go

@@ -4,8 +4,8 @@ import (
 	"testing"
 	"testing"
 	"time"
 	"time"
 
 
-	swarmapi "github.com/docker/swarmkit/api"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 func TestNetworkConvertBasicNetworkFromGRPCCreatedAt(t *testing.T) {
 func TestNetworkConvertBasicNetworkFromGRPCCreatedAt(t *testing.T) {

+ 1 - 1
daemon/cluster/convert/node.go

@@ -5,8 +5,8 @@ import (
 	"strings"
 	"strings"
 
 
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
-	swarmapi "github.com/docker/swarmkit/api"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 // NodeFromGRPC converts a grpc Node to a Node.
 // NodeFromGRPC converts a grpc Node to a Node.

+ 1 - 1
daemon/cluster/convert/secret.go

@@ -3,8 +3,8 @@ package convert // import "github.com/docker/docker/daemon/cluster/convert"
 import (
 import (
 	swarmtypes "github.com/docker/docker/api/types/swarm"
 	swarmtypes "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
-	swarmapi "github.com/docker/swarmkit/api"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 // SecretFromGRPC converts a grpc Secret to a Secret.
 // SecretFromGRPC converts a grpc Secret to a Secret.

+ 2 - 2
daemon/cluster/convert/service.go

@@ -7,10 +7,10 @@ import (
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/api/types/swarm/runtime"
 	"github.com/docker/docker/api/types/swarm/runtime"
 	"github.com/docker/docker/pkg/namesgenerator"
 	"github.com/docker/docker/pkg/namesgenerator"
-	swarmapi "github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/api/genericresource"
 	"github.com/gogo/protobuf/proto"
 	"github.com/gogo/protobuf/proto"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	swarmapi "github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/api/genericresource"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 

+ 1 - 1
daemon/cluster/convert/service_test.go

@@ -6,8 +6,8 @@ import (
 	containertypes "github.com/docker/docker/api/types/container"
 	containertypes "github.com/docker/docker/api/types/container"
 	swarmtypes "github.com/docker/docker/api/types/swarm"
 	swarmtypes "github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/api/types/swarm/runtime"
 	"github.com/docker/docker/api/types/swarm/runtime"
-	swarmapi "github.com/docker/swarmkit/api"
 	google_protobuf3 "github.com/gogo/protobuf/types"
 	google_protobuf3 "github.com/gogo/protobuf/types"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/assert"
 )
 )
 
 

+ 2 - 2
daemon/cluster/convert/swarm.go

@@ -5,9 +5,9 @@ import (
 	"strings"
 	"strings"
 
 
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
-	swarmapi "github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/ca"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	swarmapi "github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/ca"
 )
 )
 
 
 // SwarmFromGRPC converts a grpc Cluster to a Swarm.
 // SwarmFromGRPC converts a grpc Cluster to a Swarm.

+ 1 - 1
daemon/cluster/convert/task.go

@@ -4,8 +4,8 @@ import (
 	"strings"
 	"strings"
 
 
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
-	swarmapi "github.com/docker/swarmkit/api"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 // TaskFromGRPC converts a grpc Task to a Task.
 // TaskFromGRPC converts a grpc Task to a Task.

+ 1 - 1
daemon/cluster/executor/backend.go

@@ -23,7 +23,7 @@ import (
 	networktypes "github.com/docker/docker/libnetwork/types"
 	networktypes "github.com/docker/docker/libnetwork/types"
 	"github.com/docker/docker/plugin"
 	"github.com/docker/docker/plugin"
 	volumeopts "github.com/docker/docker/volume/service/opts"
 	volumeopts "github.com/docker/docker/volume/service/opts"
-	"github.com/docker/swarmkit/agent/exec"
+	"github.com/moby/swarmkit/v2/agent/exec"
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
 )
 )
 
 

+ 3 - 3
daemon/cluster/executor/container/adapter.go

@@ -22,10 +22,10 @@ import (
 	executorpkg "github.com/docker/docker/daemon/cluster/executor"
 	executorpkg "github.com/docker/docker/daemon/cluster/executor"
 	"github.com/docker/docker/libnetwork"
 	"github.com/docker/docker/libnetwork"
 	volumeopts "github.com/docker/docker/volume/service/opts"
 	volumeopts "github.com/docker/docker/volume/service/opts"
-	"github.com/docker/swarmkit/agent/exec"
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/log"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	"github.com/moby/swarmkit/v2/agent/exec"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/log"
 	"github.com/opencontainers/go-digest"
 	"github.com/opencontainers/go-digest"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"

+ 1 - 1
daemon/cluster/executor/container/adapter_test.go

@@ -7,7 +7,7 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon"
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 // TestWaitNodeAttachment tests that the waitNodeAttachment method successfully
 // TestWaitNodeAttachment tests that the waitNodeAttachment method successfully

+ 2 - 2
daemon/cluster/executor/container/attachment.go

@@ -4,8 +4,8 @@ import (
 	"context"
 	"context"
 
 
 	executorpkg "github.com/docker/docker/daemon/cluster/executor"
 	executorpkg "github.com/docker/docker/daemon/cluster/executor"
-	"github.com/docker/swarmkit/agent/exec"
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/agent/exec"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 // networkAttacherController implements agent.Controller against docker's API.
 // networkAttacherController implements agent.Controller against docker's API.

+ 4 - 4
daemon/cluster/executor/container/container.go

@@ -23,11 +23,11 @@ import (
 	netconst "github.com/docker/docker/libnetwork/datastore"
 	netconst "github.com/docker/docker/libnetwork/datastore"
 	"github.com/docker/go-connections/nat"
 	"github.com/docker/go-connections/nat"
 	"github.com/docker/go-units"
 	"github.com/docker/go-units"
-	"github.com/docker/swarmkit/agent/exec"
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/api/genericresource"
-	"github.com/docker/swarmkit/template"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	"github.com/moby/swarmkit/v2/agent/exec"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/api/genericresource"
+	"github.com/moby/swarmkit/v2/template"
 )
 )
 
 
 const (
 const (

+ 1 - 1
daemon/cluster/executor/container/container_test.go

@@ -4,7 +4,7 @@ import (
 	"testing"
 	"testing"
 
 
 	"github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/api/types/container"
-	swarmapi "github.com/docker/swarmkit/api"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/assert"
 )
 )
 
 

+ 3 - 3
daemon/cluster/executor/container/controller.go

@@ -13,10 +13,10 @@ import (
 	executorpkg "github.com/docker/docker/daemon/cluster/executor"
 	executorpkg "github.com/docker/docker/daemon/cluster/executor"
 	"github.com/docker/docker/libnetwork"
 	"github.com/docker/docker/libnetwork"
 	"github.com/docker/go-connections/nat"
 	"github.com/docker/go-connections/nat"
-	"github.com/docker/swarmkit/agent/exec"
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/log"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	"github.com/moby/swarmkit/v2/agent/exec"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/log"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"golang.org/x/time/rate"
 	"golang.org/x/time/rate"
 )
 )

+ 6 - 6
daemon/cluster/executor/container/executor.go

@@ -17,12 +17,12 @@ import (
 	clustertypes "github.com/docker/docker/daemon/cluster/provider"
 	clustertypes "github.com/docker/docker/daemon/cluster/provider"
 	"github.com/docker/docker/libnetwork"
 	"github.com/docker/docker/libnetwork"
 	networktypes "github.com/docker/docker/libnetwork/types"
 	networktypes "github.com/docker/docker/libnetwork/types"
-	"github.com/docker/swarmkit/agent"
-	"github.com/docker/swarmkit/agent/exec"
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/api/naming"
-	"github.com/docker/swarmkit/log"
-	"github.com/docker/swarmkit/template"
+	"github.com/moby/swarmkit/v2/agent"
+	"github.com/moby/swarmkit/v2/agent/exec"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/api/naming"
+	"github.com/moby/swarmkit/v2/log"
+	"github.com/moby/swarmkit/v2/template"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 )
 )

+ 1 - 1
daemon/cluster/executor/container/health_test.go

@@ -12,7 +12,7 @@ import (
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon/events"
 	"github.com/docker/docker/daemon/events"
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 func TestHealthStates(t *testing.T) {
 func TestHealthStates(t *testing.T) {

+ 1 - 1
daemon/cluster/executor/container/validate.go

@@ -5,7 +5,7 @@ import (
 	"fmt"
 	"fmt"
 	"path/filepath"
 	"path/filepath"
 
 
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 func validateMounts(mounts []api.Mount) error {
 func validateMounts(mounts []api.Mount) error {

+ 1 - 1
daemon/cluster/executor/container/validate_test.go

@@ -7,7 +7,7 @@ import (
 
 
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/stringid"
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 func newTestControllerWithMount(m api.Mount) (*controller, error) {
 func newTestControllerWithMount(m api.Mount) (*controller, error) {

+ 1 - 1
daemon/cluster/executor/container/validate_windows_test.go

@@ -6,7 +6,7 @@ import (
 	"strings"
 	"strings"
 	"testing"
 	"testing"
 
 
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 const (
 const (

+ 1 - 1
daemon/cluster/filters.go

@@ -6,7 +6,7 @@ import (
 
 
 	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/api/types/filters"
 	runconfigopts "github.com/docker/docker/runconfig/opts"
 	runconfigopts "github.com/docker/docker/runconfig/opts"
-	swarmapi "github.com/docker/swarmkit/api"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) {
 func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) {

+ 1 - 1
daemon/cluster/helpers.go

@@ -5,7 +5,7 @@ import (
 	"fmt"
 	"fmt"
 
 
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/errdefs"
-	swarmapi "github.com/docker/swarmkit/api"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 

+ 1 - 1
daemon/cluster/networks.go

@@ -12,7 +12,7 @@ import (
 	internalnetwork "github.com/docker/docker/daemon/network"
 	internalnetwork "github.com/docker/docker/daemon/network"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
-	swarmapi "github.com/docker/swarmkit/api"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 )
 )

+ 3 - 3
daemon/cluster/noderunner.go

@@ -11,9 +11,9 @@ import (
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/daemon/cluster/executor/container"
 	"github.com/docker/docker/daemon/cluster/executor/container"
 	lncluster "github.com/docker/docker/libnetwork/cluster"
 	lncluster "github.com/docker/docker/libnetwork/cluster"
-	swarmapi "github.com/docker/swarmkit/api"
-	swarmallocator "github.com/docker/swarmkit/manager/allocator/cnmallocator"
-	swarmnode "github.com/docker/swarmkit/node"
+	swarmapi "github.com/moby/swarmkit/v2/api"
+	swarmallocator "github.com/moby/swarmkit/v2/manager/allocator/cnmallocator"
+	swarmnode "github.com/moby/swarmkit/v2/node"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"

+ 1 - 1
daemon/cluster/nodes.go

@@ -7,7 +7,7 @@ import (
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/daemon/cluster/convert"
 	"github.com/docker/docker/daemon/cluster/convert"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/errdefs"
-	swarmapi "github.com/docker/swarmkit/api"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
 )
 )
 
 

+ 1 - 1
daemon/cluster/secrets.go

@@ -6,7 +6,7 @@ import (
 	apitypes "github.com/docker/docker/api/types"
 	apitypes "github.com/docker/docker/api/types"
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/daemon/cluster/convert"
 	"github.com/docker/docker/daemon/cluster/convert"
-	swarmapi "github.com/docker/swarmkit/api"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
 )
 )
 
 

+ 1 - 1
daemon/cluster/services.go

@@ -19,8 +19,8 @@ import (
 	"github.com/docker/docker/daemon/cluster/convert"
 	"github.com/docker/docker/daemon/cluster/convert"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/errdefs"
 	runconfigopts "github.com/docker/docker/runconfig/opts"
 	runconfigopts "github.com/docker/docker/runconfig/opts"
-	swarmapi "github.com/docker/swarmkit/api"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"

+ 3 - 3
daemon/cluster/swarm.go

@@ -14,9 +14,9 @@ import (
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/pkg/stack"
 	"github.com/docker/docker/pkg/stack"
-	swarmapi "github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/manager/encryption"
-	swarmnode "github.com/docker/swarmkit/node"
+	swarmapi "github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/manager/encryption"
+	swarmnode "github.com/moby/swarmkit/v2/node"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"

+ 1 - 1
daemon/cluster/tasks.go

@@ -7,7 +7,7 @@ import (
 	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/api/types/filters"
 	types "github.com/docker/docker/api/types/swarm"
 	types "github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/daemon/cluster/convert"
 	"github.com/docker/docker/daemon/cluster/convert"
-	swarmapi "github.com/docker/swarmkit/api"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
 )
 )
 
 

+ 1 - 1
daemon/config/opts.go

@@ -3,7 +3,7 @@ package config // import "github.com/docker/docker/daemon/config"
 import (
 import (
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/daemon/cluster/convert"
 	"github.com/docker/docker/daemon/cluster/convert"
-	"github.com/docker/swarmkit/api/genericresource"
+	"github.com/moby/swarmkit/v2/api/genericresource"
 )
 )
 
 
 // ParseGenericResources parses and validates the specified string as a list of GenericResource
 // ParseGenericResources parses and validates the specified string as a list of GenericResource

+ 1 - 1
daemon/dependency.go

@@ -1,7 +1,7 @@
 package daemon // import "github.com/docker/docker/daemon"
 package daemon // import "github.com/docker/docker/daemon"
 
 
 import (
 import (
-	"github.com/docker/swarmkit/agent/exec"
+	"github.com/moby/swarmkit/v2/agent/exec"
 )
 )
 
 
 // SetContainerDependencyStore sets the dependency store backend for the container
 // SetContainerDependencyStore sets the dependency store backend for the container

+ 1 - 1
daemon/events.go

@@ -11,8 +11,8 @@ import (
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/container"
 	daemonevents "github.com/docker/docker/daemon/events"
 	daemonevents "github.com/docker/docker/daemon/events"
 	"github.com/docker/docker/libnetwork"
 	"github.com/docker/docker/libnetwork"
-	swarmapi "github.com/docker/swarmkit/api"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 )
 )
 
 

+ 2 - 2
daemon/oci_windows_test.go

@@ -11,8 +11,8 @@ import (
 
 
 	containertypes "github.com/docker/docker/api/types/container"
 	containertypes "github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/container"
-	swarmagent "github.com/docker/swarmkit/agent"
-	swarmapi "github.com/docker/swarmkit/api"
+	swarmagent "github.com/moby/swarmkit/v2/agent"
+	swarmapi "github.com/moby/swarmkit/v2/api"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"golang.org/x/sys/windows/registry"
 	"golang.org/x/sys/windows/registry"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/assert"

+ 1 - 1
integration-cli/docker_api_swarm_test.go

@@ -27,7 +27,7 @@ import (
 	"github.com/docker/docker/integration-cli/daemon"
 	"github.com/docker/docker/integration-cli/daemon"
 	testdaemon "github.com/docker/docker/testutil/daemon"
 	testdaemon "github.com/docker/docker/testutil/daemon"
 	"github.com/docker/docker/testutil/request"
 	"github.com/docker/docker/testutil/request"
-	"github.com/docker/swarmkit/ca"
+	"github.com/moby/swarmkit/v2/ca"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/assert"
 	is "gotest.tools/v3/assert/cmp"
 	is "gotest.tools/v3/assert/cmp"
 	"gotest.tools/v3/poll"
 	"gotest.tools/v3/poll"

+ 1 - 1
integration-cli/docker_cli_swarm_test.go

@@ -27,7 +27,7 @@ import (
 	"github.com/docker/docker/libnetwork/driverapi"
 	"github.com/docker/docker/libnetwork/driverapi"
 	"github.com/docker/docker/libnetwork/ipamapi"
 	"github.com/docker/docker/libnetwork/ipamapi"
 	remoteipam "github.com/docker/docker/libnetwork/ipams/remote/api"
 	remoteipam "github.com/docker/docker/libnetwork/ipams/remote/api"
-	"github.com/docker/swarmkit/ca/keyutils"
+	"github.com/moby/swarmkit/v2/ca/keyutils"
 	"github.com/vishvananda/netlink"
 	"github.com/vishvananda/netlink"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/fs"
 	"gotest.tools/v3/fs"

+ 13 - 8
vendor.mod

@@ -7,7 +7,7 @@ module github.com/docker/docker
 go 1.17
 go 1.17
 
 
 require (
 require (
-	cloud.google.com/go v0.92.0
+	cloud.google.com/go v0.93.3
 	cloud.google.com/go/logging v1.4.2
 	cloud.google.com/go/logging v1.4.2
 	github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0
 	github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0
 	github.com/Microsoft/go-winio v0.5.1
 	github.com/Microsoft/go-winio v0.5.1
@@ -32,7 +32,6 @@ require (
 	github.com/docker/go-units v0.4.0
 	github.com/docker/go-units v0.4.0
 	github.com/docker/libkv v0.2.2-0.20211217103745-e480589147e3
 	github.com/docker/libkv v0.2.2-0.20211217103745-e480589147e3
 	github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4
 	github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4
-	github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0
 	github.com/fluent/fluent-logger-golang v1.9.0
 	github.com/fluent/fluent-logger-golang v1.9.0
 	github.com/fsnotify/fsnotify v1.5.1
 	github.com/fsnotify/fsnotify v1.5.1
 	github.com/godbus/dbus/v5 v5.0.6
 	github.com/godbus/dbus/v5 v5.0.6
@@ -42,17 +41,18 @@ require (
 	github.com/google/uuid v1.3.0
 	github.com/google/uuid v1.3.0
 	github.com/gorilla/mux v1.8.0
 	github.com/gorilla/mux v1.8.0
 	github.com/hashicorp/go-immutable-radix v1.3.1
 	github.com/hashicorp/go-immutable-radix v1.3.1
-	github.com/hashicorp/go-memdb v0.0.0-20161216180745-cb9a474f84cc
+	github.com/hashicorp/go-memdb v1.3.2
 	github.com/hashicorp/memberlist v0.2.4
 	github.com/hashicorp/memberlist v0.2.4
 	github.com/hashicorp/serf v0.8.2
 	github.com/hashicorp/serf v0.8.2
 	github.com/imdario/mergo v0.3.12
 	github.com/imdario/mergo v0.3.12
-	github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee
+	github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062
 	github.com/klauspost/compress v1.15.1
 	github.com/klauspost/compress v1.15.1
 	github.com/miekg/dns v1.1.27
 	github.com/miekg/dns v1.1.27
 	github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
 	github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
 	github.com/moby/buildkit v0.10.1-0.20220327110152-d7744bcb3532
 	github.com/moby/buildkit v0.10.1-0.20220327110152-d7744bcb3532
 	github.com/moby/ipvs v1.0.1
 	github.com/moby/ipvs v1.0.1
 	github.com/moby/locker v1.0.1
 	github.com/moby/locker v1.0.1
+	github.com/moby/swarmkit/v2 v2.0.0-20220420172245-6068d1894d46
 	github.com/moby/sys/mount v0.3.1
 	github.com/moby/sys/mount v0.3.1
 	github.com/moby/sys/mountinfo v0.6.0
 	github.com/moby/sys/mountinfo v0.6.0
 	github.com/moby/sys/signal v0.7.0
 	github.com/moby/sys/signal v0.7.0
@@ -60,7 +60,7 @@ require (
 	github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
 	github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
 	github.com/morikuni/aec v1.0.0
 	github.com/morikuni/aec v1.0.0
 	github.com/opencontainers/go-digest v1.0.0
 	github.com/opencontainers/go-digest v1.0.0
-	github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5
+	github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1
 	github.com/opencontainers/runc v1.1.1
 	github.com/opencontainers/runc v1.1.1
 	github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
 	github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
 	github.com/opencontainers/selinux v1.10.0
 	github.com/opencontainers/selinux v1.10.0
@@ -91,7 +91,6 @@ require (
 	code.cloudfoundry.org/clock v1.0.0 // indirect
 	code.cloudfoundry.org/clock v1.0.0 // indirect
 	github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
 	github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
 	github.com/agext/levenshtein v1.2.3 // indirect
 	github.com/agext/levenshtein v1.2.3 // indirect
-	github.com/akutz/memconn v0.1.0 // indirect
 	github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect
 	github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect
 	github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
 	github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
@@ -114,7 +113,7 @@ require (
 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
 	github.com/golang/protobuf v1.5.2 // indirect
 	github.com/golang/protobuf v1.5.2 // indirect
 	github.com/google/btree v1.0.1 // indirect
 	github.com/google/btree v1.0.1 // indirect
-	github.com/google/certificate-transparency-go v1.0.20 // indirect
+	github.com/google/certificate-transparency-go v1.1.2 // indirect
 	github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
 	github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
 	github.com/googleapis/gax-go/v2 v2.0.5 // indirect
 	github.com/googleapis/gax-go/v2 v2.0.5 // indirect
 	github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
 	github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
@@ -124,7 +123,7 @@ require (
 	github.com/hashicorp/go-msgpack v0.5.3 // indirect
 	github.com/hashicorp/go-msgpack v0.5.3 // indirect
 	github.com/hashicorp/go-multierror v1.1.1 // indirect
 	github.com/hashicorp/go-multierror v1.1.1 // indirect
 	github.com/hashicorp/go-sockaddr v1.0.2 // indirect
 	github.com/hashicorp/go-sockaddr v1.0.2 // indirect
-	github.com/hashicorp/golang-lru v0.5.3 // indirect
+	github.com/hashicorp/golang-lru v0.5.4 // indirect
 	github.com/inconshreveable/mousetrap v1.0.0 // indirect
 	github.com/inconshreveable/mousetrap v1.0.0 // indirect
 	github.com/jmespath/go-jmespath v0.3.0 // indirect
 	github.com/jmespath/go-jmespath v0.3.0 // indirect
 	github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
 	github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
@@ -184,3 +183,9 @@ replace (
 
 
 // Removes etcd dependency
 // Removes etcd dependency
 replace github.com/rexray/gocsi => github.com/dperny/gocsi v1.2.3-pre
 replace github.com/rexray/gocsi => github.com/dperny/gocsi v1.2.3-pre
+
+// Resolve dependency hell with github.com/cloudflare/cfssl (transitive via
+// swarmkit) by pinning the certificate-transparency-go version. Remove once
+// module go.etcd.io/etcd/server/v3 has upgraded its dependency on
+// go.opentelemetry.io/otel to v1.
+replace github.com/google/certificate-transparency-go => github.com/google/certificate-transparency-go v1.0.20

+ 15 - 12
vendor.sum

@@ -24,8 +24,8 @@ cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAV
 cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
 cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
 cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
 cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
 cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
 cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
-cloud.google.com/go v0.92.0 h1:gx+SLyeAiq8Et/uxOrHPuLgu3ZQ2TguaO1OE9P05QsQ=
-cloud.google.com/go v0.92.0/go.mod h1:cMc7asehN84LBi1JGTHo4n8wuaGuNAZ7lR7b1YNJBrE=
+cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
 cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
 cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
 cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
 cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
 cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
 cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -369,8 +369,6 @@ github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNE
 github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 h1:k8TfKGeAcDQFFQOGCQMRN04N4a9YrPlRMMKnzAuvM9Q=
 github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 h1:k8TfKGeAcDQFFQOGCQMRN04N4a9YrPlRMMKnzAuvM9Q=
 github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
 github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
 github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
 github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
-github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0 h1:YehAv2BPLfTm58HW04wRnNy8Oo/CAzWji7mjJ6UJWgM=
-github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0/go.mod h1:n3Z4lIEl7g261ptkGDBcYi/3qBMDl9csaAhwi2MPejs=
 github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
 github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
 github.com/dperny/gocsi v1.2.3-pre h1:GRTvl8G6yEXYPyul1h6YAqtyxzUHTrQHo6G3xZpb9oM=
 github.com/dperny/gocsi v1.2.3-pre h1:GRTvl8G6yEXYPyul1h6YAqtyxzUHTrQHo6G3xZpb9oM=
 github.com/dperny/gocsi v1.2.3-pre/go.mod h1:qQw5mIunz1RqMUfZcGJ9/Lt9EDaL0N3wPNYxFTuyLQo=
 github.com/dperny/gocsi v1.2.3-pre/go.mod h1:qQw5mIunz1RqMUfZcGJ9/Lt9EDaL0N3wPNYxFTuyLQo=
@@ -549,7 +547,6 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe
 github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210804190019-f964ff605595/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
 github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
@@ -595,10 +592,11 @@ github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv
 github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
 github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
 github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
 github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
 github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
 github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
 github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
 github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
 github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
 github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-memdb v0.0.0-20161216180745-cb9a474f84cc h1:+a6OGop8lqksGF5BgpRVghkeR3vy2HDa7lDKx6UvSRE=
-github.com/hashicorp/go-memdb v0.0.0-20161216180745-cb9a474f84cc/go.mod h1:kbfItVoBJwCfKXDXN4YoAXjxcFVZ7MRrJzyTX6H4giE=
+github.com/hashicorp/go-memdb v1.3.2 h1:RBKHOsnSszpU6vxq80LzC2BaQjuuvoyaQbkLTf7V7g8=
+github.com/hashicorp/go-memdb v1.3.2/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wWh71LpNSe3g=
 github.com/hashicorp/go-msgpack v0.0.0-20140221154404-71c2886f5a67 h1:uUGuA3Cnfp7qbFpIMOCDVz3TaWIF4lLYM8PE3YHpoA4=
 github.com/hashicorp/go-msgpack v0.0.0-20140221154404-71c2886f5a67 h1:uUGuA3Cnfp7qbFpIMOCDVz3TaWIF4lLYM8PE3YHpoA4=
 github.com/hashicorp/go-msgpack v0.0.0-20140221154404-71c2886f5a67/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
 github.com/hashicorp/go-msgpack v0.0.0-20140221154404-71c2886f5a67/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
 github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
 github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
@@ -613,8 +611,8 @@ github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1
 github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
 github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
 github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
 github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
-github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
 github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
 github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
 github.com/hashicorp/memberlist v0.2.4 h1:OOhYzSvFnkFQXm1ysE8RjXTHsqSRDyP4emusC9K7DYg=
 github.com/hashicorp/memberlist v0.2.4 h1:OOhYzSvFnkFQXm1ysE8RjXTHsqSRDyP4emusC9K7DYg=
 github.com/hashicorp/memberlist v0.2.4/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
 github.com/hashicorp/memberlist v0.2.4/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
@@ -634,8 +632,8 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
 github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
 github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
 github.com/insomniacslk/dhcp v0.0.0-20220119180841-3c283ff8b7dd/go.mod h1:h+MxyHxRg9NH3terB1nfRIUaQEcI0XOVkdR9LNBlp8E=
 github.com/insomniacslk/dhcp v0.0.0-20220119180841-3c283ff8b7dd/go.mod h1:h+MxyHxRg9NH3terB1nfRIUaQEcI0XOVkdR9LNBlp8E=
 github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
 github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
-github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee h1:PAXLXk1heNZ5yokbMBpVLZQxo43wCZxRwl00mX+dd44=
-github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
+github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062 h1:G1+wBT0dwjIrBdLy0MIG0i+E4CQxEnedHXdauJEIH6g=
+github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
 github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
 github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
 github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
 github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
 github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
 github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@@ -732,6 +730,8 @@ github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hx
 github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
 github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
 github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
 github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
 github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
 github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/swarmkit/v2 v2.0.0-20220420172245-6068d1894d46 h1:FVr9eatIpN7PlE2ZHP850rIJ6AQoZxoZvPSDR+WQY38=
+github.com/moby/swarmkit/v2 v2.0.0-20220420172245-6068d1894d46/go.mod h1:/so6Lct4y1x14UprW/loFsOe6xoXVTlvh25V36ULXNQ=
 github.com/moby/sys/mount v0.3.1 h1:RX1K0x95oR8j5P1YefKDt7tE1C2kCCixV0H8Aza3GaI=
 github.com/moby/sys/mount v0.3.1 h1:RX1K0x95oR8j5P1YefKDt7tE1C2kCCixV0H8Aza3GaI=
 github.com/moby/sys/mount v0.3.1/go.mod h1:6IZknFQiqjLpwuYJD5Zk0qYEuJiws36M88MIXnZHya0=
 github.com/moby/sys/mount v0.3.1/go.mod h1:6IZknFQiqjLpwuYJD5Zk0qYEuJiws36M88MIXnZHya0=
 github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
 github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
@@ -802,8 +802,9 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
 github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
 github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
 github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
 github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5 h1:q37d91F6BO4Jp1UqWiun0dUFYaqv6WsKTLTCaWv+8LY=
 github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
 github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 h1:9iFHD5Kt9hkOfeawBNiEeEaV7bmC4/Z5wJp8E9BptMs=
+github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1/go.mod h1:K/JAU0m27RFhDRX4PcFdIKntROP6y5Ed6O91aZYDQfs=
 github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
 github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
 github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
 github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
 github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
 github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
@@ -867,6 +868,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
 github.com/rootless-containers/rootlesskit v1.0.0 h1:+DI5RQEZa4OOnkOixkrezFye0XLlSsdrtGSP6+g1254=
 github.com/rootless-containers/rootlesskit v1.0.0 h1:+DI5RQEZa4OOnkOixkrezFye0XLlSsdrtGSP6+g1254=
 github.com/rootless-containers/rootlesskit v1.0.0/go.mod h1:8Lo4zb73rSW3seB+a7UuO1gAoRD1pVkKMbXEY3NFNTE=
 github.com/rootless-containers/rootlesskit v1.0.0/go.mod h1:8Lo4zb73rSW3seB+a7UuO1gAoRD1pVkKMbXEY3NFNTE=
 github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
 github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
+github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
 github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -974,6 +976,7 @@ github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr
 github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
 github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
 github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
 github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
 github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
 github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
 github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
 github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
 github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=

+ 81 - 3
vendor/cloud.google.com/go/doc.go

@@ -28,8 +28,50 @@ Authentication and Authorization
 
 
 All the clients in sub-packages support authentication via Google Application Default
 All the clients in sub-packages support authentication via Google Application Default
 Credentials (see https://cloud.google.com/docs/authentication/production), or
 Credentials (see https://cloud.google.com/docs/authentication/production), or
-by providing a JSON key file for a Service Account. See the authentication examples
-in this package for details.
+by providing a JSON key file for a Service Account. See examples below.
+
+Google Application Default Credentials (ADC) is the recommended way to authorize
+and authenticate clients. For information on how to create and obtain
+Application Default Credentials, see
+https://cloud.google.com/docs/authentication/production. Here is an example
+of a client using ADC to authenticate:
+ client, err := secretmanager.NewClient(context.Background())
+ if err != nil {
+ 	// TODO: handle error.
+ }
+ _ = client // Use the client.
+
+You can use a file with credentials to authenticate and authorize, such as a JSON
+key file associated with a Google service account. Service Account keys can be
+created and downloaded from
+https://console.cloud.google.com/iam-admin/serviceaccounts. This example uses
+the Secret Manger client, but the same steps apply to the other client libraries
+underneath this package. Example:
+ client, err := secretmanager.NewClient(context.Background(),
+ 	option.WithCredentialsFile("/path/to/service-account-key.json"))
+ if err != nil {
+ 	// TODO: handle error.
+ }
+ _ = client // Use the client.
+
+In some cases (for instance, you don't want to store secrets on disk), you can
+create credentials from in-memory JSON and use the WithCredentials option.
+The google package in this example is at golang.org/x/oauth2/google.
+This example uses the Secret Manager client, but the same steps apply to
+the other client libraries underneath this package. Note that scopes can be
+found at https://developers.google.com/identity/protocols/oauth2/scopes, and
+are also provided in all auto-generated libraries: for example,
+cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example:
+ ctx := context.Background()
+ creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...)
+ if err != nil {
+ 	// TODO: handle error.
+ }
+ client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds))
+ if err != nil {
+ 	// TODO: handle error.
+ }
+ _ = client // Use the client.
 
 
 
 
 Timeouts and Cancellation
 Timeouts and Cancellation
@@ -37,9 +79,45 @@ Timeouts and Cancellation
 By default, non-streaming methods, like Create or Get, will have a default deadline applied to the
 By default, non-streaming methods, like Create or Get, will have a default deadline applied to the
 context provided at call time, unless a context deadline is already set. Streaming
 context provided at call time, unless a context deadline is already set. Streaming
 methods have no default deadline and will run indefinitely. To set timeouts or
 methods have no default deadline and will run indefinitely. To set timeouts or
-arrange for cancellation, use contexts. See the examples for details. Transient
+arrange for cancellation, use contexts. Transient
 errors will be retried when correctness allows.
 errors will be retried when correctness allows.
 
 
+Here is an example of how to set a timeout for an RPC, use context.WithTimeout:
+ ctx := context.Background()
+ // Do not set a timeout on the context passed to NewClient: dialing happens
+ // asynchronously, and the context is used to refresh credentials in the
+ // background.
+ client, err := secretmanager.NewClient(ctx)
+ if err != nil {
+ 	// TODO: handle error.
+ }
+ // Time out if it takes more than 10 seconds to create a dataset.
+ tctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel() // Always call cancel.
+
+ req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"}
+ if err := client.DeleteSecret(tctx, req); err != nil {
+ 	// TODO: handle error.
+ }
+
+Here is an example of how to arrange for an RPC to be canceled, use context.WithCancel:
+ ctx := context.Background()
+ // Do not cancel the context passed to NewClient: dialing happens asynchronously,
+ // and the context is used to refresh credentials in the background.
+ client, err := secretmanager.NewClient(ctx)
+ if err != nil {
+ 	// TODO: handle error.
+ }
+ cctx, cancel := context.WithCancel(ctx)
+ defer cancel() // Always call cancel.
+
+ // TODO: Make the cancel function available to whatever might want to cancel the
+ // call--perhaps a GUI button.
+ req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"}
+ if err := client.DeleteSecret(cctx, req); err != nil {
+ 	// TODO: handle error.
+ }
+
 To opt out of default deadlines, set the temporary environment variable
 To opt out of default deadlines, set the temporary environment variable
 GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client
 GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client
 creation. This affects all Google Cloud Go client libraries. This opt-out
 creation. This affects all Google Cloud Go client libraries. This opt-out

+ 2 - 0
vendor/github.com/hashicorp/go-memdb/.gitignore

@@ -22,3 +22,5 @@ _testmain.go
 *.exe
 *.exe
 *.test
 *.test
 *.prof
 *.prof
+
+.idea

+ 80 - 27
vendor/github.com/hashicorp/go-memdb/README.md

@@ -1,9 +1,9 @@
-# go-memdb
+# go-memdb [![CircleCI](https://circleci.com/gh/hashicorp/go-memdb/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-memdb/tree/master)
 
 
 Provides the `memdb` package that implements a simple in-memory database
 Provides the `memdb` package that implements a simple in-memory database
 built on immutable radix trees. The database provides Atomicity, Consistency
 built on immutable radix trees. The database provides Atomicity, Consistency
 and Isolation from ACID. Being that it is in-memory, it does not provide durability.
 and Isolation from ACID. Being that it is in-memory, it does not provide durability.
-The database is instantiated with a schema that specifies the tables and indicies
+The database is instantiated with a schema that specifies the tables and indices
 that exist and allows transactions to be executed.
 that exist and allows transactions to be executed.
 
 
 The database provides the following:
 The database provides the following:
@@ -19,58 +19,75 @@ The database provides the following:
 
 
 * Rich Indexing - Tables can support any number of indexes, which can be simple like
 * Rich Indexing - Tables can support any number of indexes, which can be simple like
   a single field index, or more advanced compound field indexes. Certain types like
   a single field index, or more advanced compound field indexes. Certain types like
-  UUID can be efficiently compressed from strings into byte indexes for reduces
+  UUID can be efficiently compressed from strings into byte indexes for reduced
   storage requirements.
   storage requirements.
 
 
+* Watches - Callers can populate a watch set as part of a query, which can be used to
+  detect when a modification has been made to the database which affects the query
+  results. This lets callers easily watch for changes in the database in a very general
+  way.
+
 For the underlying immutable radix trees, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
 For the underlying immutable radix trees, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
 
 
 Documentation
 Documentation
 =============
 =============
 
 
-The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-memdb).
+The full documentation is available on [Godoc](https://pkg.go.dev/github.com/hashicorp/go-memdb).
 
 
 Example
 Example
 =======
 =======
 
 
-Below is a simple example of usage
+Below is a [simple example](https://play.golang.org/p/gCGE9FA4og1) of usage
 
 
 ```go
 ```go
 // Create a sample struct
 // Create a sample struct
 type Person struct {
 type Person struct {
-    Email string
-    Name  string
-    Age   int
+	Email string
+	Name  string
+	Age   int
 }
 }
 
 
 // Create the DB schema
 // Create the DB schema
 schema := &memdb.DBSchema{
 schema := &memdb.DBSchema{
-    Tables: map[string]*memdb.TableSchema{
-        "person": &memdb.TableSchema{
-            Name: "person",
-            Indexes: map[string]*memdb.IndexSchema{
-                "id": &memdb.IndexSchema{
-                    Name:    "id",
-                    Unique:  true,
-                    Indexer: &memdb.StringFieldIndex{Field: "Email"},
-                },
-            },
-        },
-    },
+	Tables: map[string]*memdb.TableSchema{
+		"person": &memdb.TableSchema{
+			Name: "person",
+			Indexes: map[string]*memdb.IndexSchema{
+				"id": &memdb.IndexSchema{
+					Name:    "id",
+					Unique:  true,
+					Indexer: &memdb.StringFieldIndex{Field: "Email"},
+				},
+				"age": &memdb.IndexSchema{
+					Name:    "age",
+					Unique:  false,
+					Indexer: &memdb.IntFieldIndex{Field: "Age"},
+				},
+			},
+		},
+	},
 }
 }
 
 
 // Create a new data base
 // Create a new data base
 db, err := memdb.NewMemDB(schema)
 db, err := memdb.NewMemDB(schema)
 if err != nil {
 if err != nil {
-    panic(err)
+	panic(err)
 }
 }
 
 
 // Create a write transaction
 // Create a write transaction
 txn := db.Txn(true)
 txn := db.Txn(true)
 
 
-// Insert a new person
-p := &Person{"joe@aol.com", "Joe", 30}
-if err := txn.Insert("person", p); err != nil {
-    panic(err)
+// Insert some people
+people := []*Person{
+	&Person{"joe@aol.com", "Joe", 30},
+	&Person{"lucy@aol.com", "Lucy", 35},
+	&Person{"tariq@aol.com", "Tariq", 21},
+	&Person{"dorothy@aol.com", "Dorothy", 53},
+}
+for _, p := range people {
+	if err := txn.Insert("person", p); err != nil {
+		panic(err)
+	}
 }
 }
 
 
 // Commit the transaction
 // Commit the transaction
@@ -83,11 +100,47 @@ defer txn.Abort()
 // Lookup by email
 // Lookup by email
 raw, err := txn.First("person", "id", "joe@aol.com")
 raw, err := txn.First("person", "id", "joe@aol.com")
 if err != nil {
 if err != nil {
-    panic(err)
+	panic(err)
 }
 }
 
 
 // Say hi!
 // Say hi!
-fmt.Printf("Hello %s!", raw.(*Person).Name)
+fmt.Printf("Hello %s!\n", raw.(*Person).Name)
+
+// List all the people
+it, err := txn.Get("person", "id")
+if err != nil {
+	panic(err)
+}
 
 
+fmt.Println("All the people:")
+for obj := it.Next(); obj != nil; obj = it.Next() {
+	p := obj.(*Person)
+	fmt.Printf("  %s\n", p.Name)
+}
+
+// Range scan over people with ages between 25 and 35 inclusive
+it, err = txn.LowerBound("person", "age", 25)
+if err != nil {
+	panic(err)
+}
+
+fmt.Println("People aged 25 - 35:")
+for obj := it.Next(); obj != nil; obj = it.Next() {
+	p := obj.(*Person)
+	if p.Age > 35 {
+		break
+	}
+	fmt.Printf("  %s is aged %d\n", p.Name, p.Age)
+}
+// Output:
+// Hello Joe!
+// All the people:
+//   Dorothy
+//   Joe
+//   Lucy
+//   Tariq
+// People aged 25 - 35:
+//   Joe is aged 30
+//   Lucy is aged 35
 ```
 ```
 
 

+ 34 - 0
vendor/github.com/hashicorp/go-memdb/changes.go

@@ -0,0 +1,34 @@
+package memdb
+
+// Changes describes a set of mutations to memDB tables performed during a
+// transaction.
+type Changes []Change
+
+// Change describes a mutation to an object in a table.
+type Change struct {
+	Table  string
+	Before interface{}
+	After  interface{}
+
+	// primaryKey stores the raw key value from the primary index so that we can
+	// de-duplicate multiple updates of the same object in the same transaction
+	// but we don't expose this implementation detail to the consumer.
+	primaryKey []byte
+}
+
+// Created returns true if the mutation describes a new object being inserted.
+func (m *Change) Created() bool {
+	return m.Before == nil && m.After != nil
+}
+
+// Updated returns true if the mutation describes an existing object being
+// updated.
+func (m *Change) Updated() bool {
+	return m.Before != nil && m.After != nil
+}
+
+// Deleted returns true if the mutation describes an existing object being
+// deleted.
+func (m *Change) Deleted() bool {
+	return m.Before != nil && m.After == nil
+}

+ 38 - 0
vendor/github.com/hashicorp/go-memdb/filter.go

@@ -0,0 +1,38 @@
+package memdb
+
+// FilterFunc is a function that takes the results of an iterator and returns
+// whether the result should be filtered out.
+type FilterFunc func(interface{}) bool
+
+// FilterIterator is used to wrap a ResultIterator and apply a filter over it.
+type FilterIterator struct {
+	// filter is the filter function applied over the base iterator.
+	filter FilterFunc
+
+	// iter is the iterator that is being wrapped.
+	iter ResultIterator
+}
+
+// NewFilterIterator wraps a ResultIterator. The filter function is applied
+// to each value returned by a call to iter.Next.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned FilterIterator.
+func NewFilterIterator(iter ResultIterator, filter FilterFunc) *FilterIterator {
+	return &FilterIterator{
+		filter: filter,
+		iter:   iter,
+	}
+}
+
+// WatchCh returns the watch channel of the wrapped iterator.
+func (f *FilterIterator) WatchCh() <-chan struct{} { return f.iter.WatchCh() }
+
+// Next returns the next non-filtered result from the wrapped iterator.
+func (f *FilterIterator) Next() interface{} {
+	for {
+		if value := f.iter.Next(); value == nil || !f.filter(value) {
+			return value
+		}
+	}
+}

+ 501 - 21
vendor/github.com/hashicorp/go-memdb/index.go

@@ -1,41 +1,55 @@
 package memdb
 package memdb
 
 
 import (
 import (
+	"encoding/binary"
 	"encoding/hex"
 	"encoding/hex"
+	"errors"
 	"fmt"
 	"fmt"
+	"math/bits"
 	"reflect"
 	"reflect"
 	"strings"
 	"strings"
 )
 )
 
 
-// Indexer is an interface used for defining indexes
+// Indexer is an interface used for defining indexes. Indexes are used
+// for efficient lookup of objects in a MemDB table. An Indexer must also
+// implement one of SingleIndexer or MultiIndexer.
+//
+// Indexers are primarily responsible for returning the lookup key as
+// a byte slice. The byte slice is the key data in the underlying data storage.
 type Indexer interface {
 type Indexer interface {
-	// ExactFromArgs is used to build an exact index lookup
-	// based on arguments
+	// FromArgs is called to build the exact index key from a list of arguments.
 	FromArgs(args ...interface{}) ([]byte, error)
 	FromArgs(args ...interface{}) ([]byte, error)
 }
 }
 
 
-// SingleIndexer is an interface used for defining indexes
-// generating a single entry per object
+// SingleIndexer is an interface used for defining indexes that generate a
+// single value per object
 type SingleIndexer interface {
 type SingleIndexer interface {
-	// FromObject is used to extract an index value from an
-	// object or to indicate that the index value is missing.
+	// FromObject extracts the index value from an object. The return values
+	// are whether the index value was found, the index value, and any error
+	// while extracting the index value, respectively.
 	FromObject(raw interface{}) (bool, []byte, error)
 	FromObject(raw interface{}) (bool, []byte, error)
 }
 }
 
 
-// MultiIndexer is an interface used for defining indexes
-// generating multiple entries per object
+// MultiIndexer is an interface used for defining indexes that generate
+// multiple values per object. Each value is stored as a seperate index
+// pointing to the same object.
+//
+// For example, an index that extracts the first and last name of a person
+// and allows lookup based on eitherd would be a MultiIndexer. The FromObject
+// of this example would split the first and last name and return both as
+// values.
 type MultiIndexer interface {
 type MultiIndexer interface {
-	// FromObject is used to extract index values from an
-	// object or to indicate that the index value is missing.
+	// FromObject extracts index values from an object. The return values
+	// are the same as a SingleIndexer except there can be multiple index
+	// values.
 	FromObject(raw interface{}) (bool, [][]byte, error)
 	FromObject(raw interface{}) (bool, [][]byte, error)
 }
 }
 
 
-// PrefixIndexer can optionally be implemented for any
-// indexes that support prefix based iteration. This may
-// not apply to all indexes.
+// PrefixIndexer is an optional interface on top of an Indexer that allows
+// indexes to support prefix-based iteration.
 type PrefixIndexer interface {
 type PrefixIndexer interface {
-	// PrefixFromArgs returns a prefix that should be used
-	// for scanning based on the arguments
+	// PrefixFromArgs is the same as FromArgs for an Indexer except that
+	// the index value returned should return all prefix-matched values.
 	PrefixFromArgs(args ...interface{}) ([]byte, error)
 	PrefixFromArgs(args ...interface{}) ([]byte, error)
 }
 }
 
 
@@ -51,9 +65,16 @@ func (s *StringFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
 	v = reflect.Indirect(v) // Dereference the pointer if any
 	v = reflect.Indirect(v) // Dereference the pointer if any
 
 
 	fv := v.FieldByName(s.Field)
 	fv := v.FieldByName(s.Field)
-	if !fv.IsValid() {
+	isPtr := fv.Kind() == reflect.Ptr
+	fv = reflect.Indirect(fv)
+	if !isPtr && !fv.IsValid() {
 		return false, nil,
 		return false, nil,
-			fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj)
+			fmt.Errorf("field '%s' for %#v is invalid %v ", s.Field, obj, isPtr)
+	}
+
+	if isPtr && !fv.IsValid() {
+		val := ""
+		return false, []byte(val), nil
 	}
 	}
 
 
 	val := fv.String()
 	val := fv.String()
@@ -100,8 +121,9 @@ func (s *StringFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
 	return val, nil
 	return val, nil
 }
 }
 
 
-// StringSliceFieldIndex is used to extract a field from an object
-// using reflection and builds an index on that field.
+// StringSliceFieldIndex builds an index from a field on an object that is a
+// string slice ([]string). Each value within the string slice can be used for
+// lookup.
 type StringSliceFieldIndex struct {
 type StringSliceFieldIndex struct {
 	Field     string
 	Field     string
 	Lowercase bool
 	Lowercase bool
@@ -173,6 +195,290 @@ func (s *StringSliceFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, err
 	return val, nil
 	return val, nil
 }
 }
 
 
+// StringMapFieldIndex is used to extract a field of type map[string]string
+// from an object using reflection and builds an index on that field.
+//
+// Note that although FromArgs in theory supports using either one or
+// two arguments, there is a bug: FromObject only creates an index
+// using key/value, and does not also create an index using key. This
+// means a lookup using one argument will never actually work.
+//
+// It is currently left as-is to prevent backwards compatibility
+// issues.
+//
+// TODO: Fix this in the next major bump.
+type StringMapFieldIndex struct {
+	Field     string
+	Lowercase bool
+}
+
+var MapType = reflect.MapOf(reflect.TypeOf(""), reflect.TypeOf("")).Kind()
+
+func (s *StringMapFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) {
+	v := reflect.ValueOf(obj)
+	v = reflect.Indirect(v) // Dereference the pointer if any
+
+	fv := v.FieldByName(s.Field)
+	if !fv.IsValid() {
+		return false, nil, fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj)
+	}
+
+	if fv.Kind() != MapType {
+		return false, nil, fmt.Errorf("field '%s' is not a map[string]string", s.Field)
+	}
+
+	length := fv.Len()
+	vals := make([][]byte, 0, length)
+	for _, key := range fv.MapKeys() {
+		k := key.String()
+		if k == "" {
+			continue
+		}
+		val := fv.MapIndex(key).String()
+
+		if s.Lowercase {
+			k = strings.ToLower(k)
+			val = strings.ToLower(val)
+		}
+
+		// Add the null character as a terminator
+		k += "\x00" + val + "\x00"
+
+		vals = append(vals, []byte(k))
+	}
+	if len(vals) == 0 {
+		return false, nil, nil
+	}
+	return true, vals, nil
+}
+
+// WARNING: Because of a bug in FromObject, this function will never return
+// a value when using the single-argument version.
+func (s *StringMapFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+	if len(args) > 2 || len(args) == 0 {
+		return nil, fmt.Errorf("must provide one or two arguments")
+	}
+	key, ok := args[0].(string)
+	if !ok {
+		return nil, fmt.Errorf("argument must be a string: %#v", args[0])
+	}
+	if s.Lowercase {
+		key = strings.ToLower(key)
+	}
+	// Add the null character as a terminator
+	key += "\x00"
+
+	if len(args) == 2 {
+		val, ok := args[1].(string)
+		if !ok {
+			return nil, fmt.Errorf("argument must be a string: %#v", args[1])
+		}
+		if s.Lowercase {
+			val = strings.ToLower(val)
+		}
+		// Add the null character as a terminator
+		key += val + "\x00"
+	}
+
+	return []byte(key), nil
+}
+
+// IntFieldIndex is used to extract an int field from an object using
+// reflection and builds an index on that field.
+type IntFieldIndex struct {
+	Field string
+}
+
+func (i *IntFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+	v := reflect.ValueOf(obj)
+	v = reflect.Indirect(v) // Dereference the pointer if any
+
+	fv := v.FieldByName(i.Field)
+	if !fv.IsValid() {
+		return false, nil,
+			fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj)
+	}
+
+	// Check the type
+	k := fv.Kind()
+	size, ok := IsIntType(k)
+	if !ok {
+		return false, nil, fmt.Errorf("field %q is of type %v; want an int", i.Field, k)
+	}
+
+	// Get the value and encode it
+	val := fv.Int()
+	buf := make([]byte, size)
+	binary.PutVarint(buf, val)
+
+	return true, buf, nil
+}
+
+func (i *IntFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+	if len(args) != 1 {
+		return nil, fmt.Errorf("must provide only a single argument")
+	}
+
+	v := reflect.ValueOf(args[0])
+	if !v.IsValid() {
+		return nil, fmt.Errorf("%#v is invalid", args[0])
+	}
+
+	k := v.Kind()
+	size, ok := IsIntType(k)
+	if !ok {
+		return nil, fmt.Errorf("arg is of type %v; want a int", k)
+	}
+
+	val := v.Int()
+	buf := make([]byte, size)
+	binary.PutVarint(buf, val)
+
+	return buf, nil
+}
+
+// IsIntType returns whether the passed type is a type of int and the number
+// of bytes needed to encode the type.
+func IsIntType(k reflect.Kind) (size int, okay bool) {
+	switch k {
+	case reflect.Int:
+		return binary.MaxVarintLen64, true
+	case reflect.Int8:
+		return 2, true
+	case reflect.Int16:
+		return binary.MaxVarintLen16, true
+	case reflect.Int32:
+		return binary.MaxVarintLen32, true
+	case reflect.Int64:
+		return binary.MaxVarintLen64, true
+	default:
+		return 0, false
+	}
+}
+
+// UintFieldIndex is used to extract a uint field from an object using
+// reflection and builds an index on that field.
+type UintFieldIndex struct {
+	Field string
+}
+
+func (u *UintFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+	v := reflect.ValueOf(obj)
+	v = reflect.Indirect(v) // Dereference the pointer if any
+
+	fv := v.FieldByName(u.Field)
+	if !fv.IsValid() {
+		return false, nil,
+			fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj)
+	}
+
+	// Check the type
+	k := fv.Kind()
+	size, ok := IsUintType(k)
+	if !ok {
+		return false, nil, fmt.Errorf("field %q is of type %v; want a uint", u.Field, k)
+	}
+
+	// Get the value and encode it
+	val := fv.Uint()
+	buf := encodeUInt(val, size)
+
+	return true, buf, nil
+}
+
+func (u *UintFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+	if len(args) != 1 {
+		return nil, fmt.Errorf("must provide only a single argument")
+	}
+
+	v := reflect.ValueOf(args[0])
+	if !v.IsValid() {
+		return nil, fmt.Errorf("%#v is invalid", args[0])
+	}
+
+	k := v.Kind()
+	size, ok := IsUintType(k)
+	if !ok {
+		return nil, fmt.Errorf("arg is of type %v; want a uint", k)
+	}
+
+	val := v.Uint()
+	buf := encodeUInt(val, size)
+
+	return buf, nil
+}
+
+func encodeUInt(val uint64, size int) []byte {
+	buf := make([]byte, size)
+
+	switch size {
+	case 1:
+		buf[0] = uint8(val)
+	case 2:
+		binary.BigEndian.PutUint16(buf, uint16(val))
+	case 4:
+		binary.BigEndian.PutUint32(buf, uint32(val))
+	case 8:
+		binary.BigEndian.PutUint64(buf, val)
+	}
+
+	return buf
+}
+
+// IsUintType returns whether the passed type is a type of uint and the number
+// of bytes needed to encode the type.
+func IsUintType(k reflect.Kind) (size int, okay bool) {
+	switch k {
+	case reflect.Uint:
+		return bits.UintSize / 8, true
+	case reflect.Uint8:
+		return 1, true
+	case reflect.Uint16:
+		return 2, true
+	case reflect.Uint32:
+		return 4, true
+	case reflect.Uint64:
+		return 8, true
+	default:
+		return 0, false
+	}
+}
+
+// BoolFieldIndex is used to extract an boolean field from an object using
+// reflection and builds an index on that field.
+type BoolFieldIndex struct {
+	Field string
+}
+
+func (i *BoolFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+	v := reflect.ValueOf(obj)
+	v = reflect.Indirect(v) // Dereference the pointer if any
+
+	fv := v.FieldByName(i.Field)
+	if !fv.IsValid() {
+		return false, nil,
+			fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj)
+	}
+
+	// Check the type
+	k := fv.Kind()
+	if k != reflect.Bool {
+		return false, nil, fmt.Errorf("field %q is of type %v; want a bool", i.Field, k)
+	}
+
+	// Get the value and encode it
+	buf := make([]byte, 1)
+	if fv.Bool() {
+		buf[0] = 1
+	}
+
+	return true, buf, nil
+}
+
+func (i *BoolFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+	return fromBoolArgs(args)
+}
+
 // UUIDFieldIndex is used to extract a field from an object
 // UUIDFieldIndex is used to extract a field from an object
 // using reflection and builds an index on that field by treating
 // using reflection and builds an index on that field by treating
 // it as a UUID. This is an optimization to using a StringFieldIndex
 // it as a UUID. This is an optimization to using a StringFieldIndex
@@ -378,7 +684,7 @@ func (c *CompoundIndex) FromObject(raw interface{}) (bool, []byte, error) {
 
 
 func (c *CompoundIndex) FromArgs(args ...interface{}) ([]byte, error) {
 func (c *CompoundIndex) FromArgs(args ...interface{}) ([]byte, error) {
 	if len(args) != len(c.Indexes) {
 	if len(args) != len(c.Indexes) {
-		return nil, fmt.Errorf("less arguments than index fields")
+		return nil, fmt.Errorf("non-equivalent argument count and index fields")
 	}
 	}
 	var out []byte
 	var out []byte
 	for i, arg := range args {
 	for i, arg := range args {
@@ -417,3 +723,177 @@ func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
 	}
 	}
 	return out, nil
 	return out, nil
 }
 }
+
+// CompoundMultiIndex is used to build an index using multiple
+// sub-indexes.
+//
+// Unlike CompoundIndex, CompoundMultiIndex can have both
+// SingleIndexer and MultiIndexer sub-indexers. However, each
+// MultiIndexer adds considerable overhead/complexity in terms of
+// the number of indexes created under-the-hood. It is not suggested
+// to use more than one or two, if possible.
+//
+// Another change from CompoundIndexer is that if AllowMissing is
+// set, not only is it valid to have empty index fields, but it will
+// still create index values up to the first empty index. This means
+// that if you have a value with an empty field, rather than using a
+// prefix for lookup, you can simply pass in less arguments. As an
+// example, if {Foo, Bar} is indexed but Bar is missing for a value
+// and AllowMissing is set, an index will still be created for {Foo}
+// and it is valid to do a lookup passing in only Foo as an argument.
+// Note that the ordering isn't guaranteed -- it's last-insert wins,
+// but this is true if you have two objects that have the same
+// indexes not using AllowMissing anyways.
+//
+// Because StringMapFieldIndexers can take a varying number of args,
+// it is currently a requirement that whenever it is used, two
+// arguments must _always_ be provided for it. In theory we only
+// need one, except a bug in that indexer means the single-argument
+// version will never work. You can leave the second argument nil,
+// but it will never produce a value. We support this for whenever
+// that bug is fixed, likely in a next major version bump.
+//
+// Prefix-based indexing is not currently supported.
+type CompoundMultiIndex struct {
+	Indexes []Indexer
+
+	// AllowMissing results in an index based on only the indexers
+	// that return data. If true, you may end up with 2/3 columns
+	// indexed which might be useful for an index scan. Otherwise,
+	// CompoundMultiIndex requires all indexers to be satisfied.
+	AllowMissing bool
+}
+
+func (c *CompoundMultiIndex) FromObject(raw interface{}) (bool, [][]byte, error) {
+	// At each entry, builder is storing the results from the next index
+	builder := make([][][]byte, 0, len(c.Indexes))
+	// Start with something higher to avoid resizing if possible
+	out := make([][]byte, 0, len(c.Indexes)^3)
+
+forloop:
+	// This loop goes through each indexer and adds the value(s) provided to the next
+	// entry in the slice. We can then later walk it like a tree to construct the indices.
+	for i, idxRaw := range c.Indexes {
+		switch idx := idxRaw.(type) {
+		case SingleIndexer:
+			ok, val, err := idx.FromObject(raw)
+			if err != nil {
+				return false, nil, fmt.Errorf("single sub-index %d error: %v", i, err)
+			}
+			if !ok {
+				if c.AllowMissing {
+					break forloop
+				} else {
+					return false, nil, nil
+				}
+			}
+			builder = append(builder, [][]byte{val})
+
+		case MultiIndexer:
+			ok, vals, err := idx.FromObject(raw)
+			if err != nil {
+				return false, nil, fmt.Errorf("multi sub-index %d error: %v", i, err)
+			}
+			if !ok {
+				if c.AllowMissing {
+					break forloop
+				} else {
+					return false, nil, nil
+				}
+			}
+
+			// Add each of the new values to each of the old values
+			builder = append(builder, vals)
+
+		default:
+			return false, nil, fmt.Errorf("sub-index %d does not satisfy either SingleIndexer or MultiIndexer", i)
+		}
+	}
+
+	// We are walking through the builder slice essentially in a depth-first fashion,
+	// building the prefix and leaves as we go. If AllowMissing is false, we only insert
+	// these full paths to leaves. Otherwise, we also insert each prefix along the way.
+	// This allows for lookup in FromArgs when AllowMissing is true that does not contain
+	// the full set of arguments. e.g. for {Foo, Bar} where an object has only the Foo
+	// field specified as "abc", it is valid to call FromArgs with just "abc".
+	var walkVals func([]byte, int)
+	walkVals = func(currPrefix []byte, depth int) {
+		if depth == len(builder)-1 {
+			// These are the "leaves", so append directly
+			for _, v := range builder[depth] {
+				out = append(out, append(currPrefix, v...))
+			}
+			return
+		}
+		for _, v := range builder[depth] {
+			nextPrefix := append(currPrefix, v...)
+			if c.AllowMissing {
+				out = append(out, nextPrefix)
+			}
+			walkVals(nextPrefix, depth+1)
+		}
+	}
+
+	walkVals(nil, 0)
+
+	return true, out, nil
+}
+
+func (c *CompoundMultiIndex) FromArgs(args ...interface{}) ([]byte, error) {
+	var stringMapCount int
+	var argCount int
+	for _, index := range c.Indexes {
+		if argCount >= len(args) {
+			break
+		}
+		if _, ok := index.(*StringMapFieldIndex); ok {
+			// We require pairs for StringMapFieldIndex, but only got one
+			if argCount+1 >= len(args) {
+				return nil, errors.New("invalid number of arguments")
+			}
+			stringMapCount++
+			argCount += 2
+		} else {
+			argCount++
+		}
+	}
+	argCount = 0
+
+	switch c.AllowMissing {
+	case true:
+		if len(args) > len(c.Indexes)+stringMapCount {
+			return nil, errors.New("too many arguments")
+		}
+
+	default:
+		if len(args) != len(c.Indexes)+stringMapCount {
+			return nil, errors.New("number of arguments does not equal number of indexers")
+		}
+	}
+
+	var out []byte
+	var val []byte
+	var err error
+	for i, idx := range c.Indexes {
+		if argCount >= len(args) {
+			// We're done; should only hit this if AllowMissing
+			break
+		}
+		if _, ok := idx.(*StringMapFieldIndex); ok {
+			if args[argCount+1] == nil {
+				val, err = idx.FromArgs(args[argCount])
+			} else {
+				val, err = idx.FromArgs(args[argCount : argCount+2]...)
+			}
+			argCount += 2
+		} else {
+			val, err = idx.FromArgs(args[argCount])
+			argCount++
+		}
+		if err != nil {
+			return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+		}
+		out = append(out, val...)
+	}
+	return out, nil
+}

+ 37 - 18
vendor/github.com/hashicorp/go-memdb/memdb.go

@@ -1,3 +1,5 @@
+// Package memdb provides an in-memory database that supports transactions
+// and MVCC.
 package memdb
 package memdb
 
 
 import (
 import (
@@ -8,19 +10,29 @@ import (
 	"github.com/hashicorp/go-immutable-radix"
 	"github.com/hashicorp/go-immutable-radix"
 )
 )
 
 
-// MemDB is an in-memory database. It provides a table abstraction,
-// which is used to store objects (rows) with multiple indexes based
-// on values. The database makes use of immutable radix trees to provide
-// transactions and MVCC.
+// MemDB is an in-memory database providing Atomicity, Consistency, and
+// Isolation from ACID. MemDB doesn't provide Durability since it is an
+// in-memory database.
+//
+// MemDB provides a table abstraction to store objects (rows) with multiple
+// indexes based on inserted values. The database makes use of immutable radix
+// trees to provide transactions and MVCC.
+//
+// Objects inserted into MemDB are not copied. It is **extremely important**
+// that objects are not modified in-place after they are inserted since they
+// are stored directly in MemDB. It remains unsafe to modify inserted objects
+// even after they've been deleted from MemDB since there may still be older
+// snapshots of the DB being read from other goroutines.
 type MemDB struct {
 type MemDB struct {
-	schema *DBSchema
-	root   unsafe.Pointer // *iradix.Tree underneath
+	schema  *DBSchema
+	root    unsafe.Pointer // *iradix.Tree underneath
+	primary bool
 
 
-	// There can only be a single writter at once
+	// There can only be a single writer at once
 	writer sync.Mutex
 	writer sync.Mutex
 }
 }
 
 
-// NewMemDB creates a new MemDB with the given schema
+// NewMemDB creates a new MemDB with the given schema.
 func NewMemDB(schema *DBSchema) (*MemDB, error) {
 func NewMemDB(schema *DBSchema) (*MemDB, error) {
 	// Validate the schema
 	// Validate the schema
 	if err := schema.Validate(); err != nil {
 	if err := schema.Validate(); err != nil {
@@ -29,12 +41,14 @@ func NewMemDB(schema *DBSchema) (*MemDB, error) {
 
 
 	// Create the MemDB
 	// Create the MemDB
 	db := &MemDB{
 	db := &MemDB{
-		schema: schema,
-		root:   unsafe.Pointer(iradix.New()),
+		schema:  schema,
+		root:    unsafe.Pointer(iradix.New()),
+		primary: true,
 	}
 	}
 	if err := db.initialize(); err != nil {
 	if err := db.initialize(); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
+
 	return db, nil
 	return db, nil
 }
 }
 
 
@@ -44,7 +58,7 @@ func (db *MemDB) getRoot() *iradix.Tree {
 	return root
 	return root
 }
 }
 
 
-// Txn is used to start a new transaction, in either read or write mode.
+// Txn is used to start a new transaction in either read or write mode.
 // There can only be a single concurrent writer, but any number of readers.
 // There can only be a single concurrent writer, but any number of readers.
 func (db *MemDB) Txn(write bool) *Txn {
 func (db *MemDB) Txn(write bool) *Txn {
 	if write {
 	if write {
@@ -58,22 +72,27 @@ func (db *MemDB) Txn(write bool) *Txn {
 	return txn
 	return txn
 }
 }
 
 
-// Snapshot is used to capture a point-in-time snapshot
-// of the database that will not be affected by any write
-// operations to the existing DB.
+// Snapshot is used to capture a point-in-time snapshot  of the database that
+// will not be affected by any write operations to the existing DB.
+//
+// If MemDB is storing reference-based values (pointers, maps, slices, etc.),
+// the Snapshot will not deep copy those values. Therefore, it is still unsafe
+// to modify any inserted values in either DB.
 func (db *MemDB) Snapshot() *MemDB {
 func (db *MemDB) Snapshot() *MemDB {
 	clone := &MemDB{
 	clone := &MemDB{
-		schema: db.schema,
-		root:   unsafe.Pointer(db.getRoot()),
+		schema:  db.schema,
+		root:    unsafe.Pointer(db.getRoot()),
+		primary: false,
 	}
 	}
 	return clone
 	return clone
 }
 }
 
 
-// initialize is used to setup the DB for use after creation
+// initialize is used to setup the DB for use after creation. This should
+// be called only once after allocating a MemDB.
 func (db *MemDB) initialize() error {
 func (db *MemDB) initialize() error {
 	root := db.getRoot()
 	root := db.getRoot()
 	for tName, tableSchema := range db.schema.Tables {
 	for tName, tableSchema := range db.schema.Tables {
-		for iName, _ := range tableSchema.Indexes {
+		for iName := range tableSchema.Indexes {
 			index := iradix.New()
 			index := iradix.New()
 			path := indexPath(tName, iName)
 			path := indexPath(tName, iName)
 			root, _, _ = root.Insert(path, index)
 			root, _, _ = root.Insert(path, index)

+ 42 - 13
vendor/github.com/hashicorp/go-memdb/schema.go

@@ -2,33 +2,47 @@ package memdb
 
 
 import "fmt"
 import "fmt"
 
 
-// DBSchema contains the full database schema used for MemDB
+// DBSchema is the schema to use for the full database with a MemDB instance.
+//
+// MemDB will require a valid schema. Schema validation can be tested using
+// the Validate function. Calling this function is recommended in unit tests.
 type DBSchema struct {
 type DBSchema struct {
+	// Tables is the set of tables within this database. The key is the
+	// table name and must match the Name in TableSchema.
 	Tables map[string]*TableSchema
 	Tables map[string]*TableSchema
 }
 }
 
 
-// Validate is used to validate the database schema
+// Validate validates the schema.
 func (s *DBSchema) Validate() error {
 func (s *DBSchema) Validate() error {
 	if s == nil {
 	if s == nil {
-		return fmt.Errorf("missing schema")
+		return fmt.Errorf("schema is nil")
 	}
 	}
+
 	if len(s.Tables) == 0 {
 	if len(s.Tables) == 0 {
-		return fmt.Errorf("no tables defined")
+		return fmt.Errorf("schema has no tables defined")
 	}
 	}
+
 	for name, table := range s.Tables {
 	for name, table := range s.Tables {
 		if name != table.Name {
 		if name != table.Name {
 			return fmt.Errorf("table name mis-match for '%s'", name)
 			return fmt.Errorf("table name mis-match for '%s'", name)
 		}
 		}
+
 		if err := table.Validate(); err != nil {
 		if err := table.Validate(); err != nil {
-			return err
+			return fmt.Errorf("table %q: %s", name, err)
 		}
 		}
 	}
 	}
+
 	return nil
 	return nil
 }
 }
 
 
-// TableSchema contains the schema for a single table
+// TableSchema is the schema for a single table.
 type TableSchema struct {
 type TableSchema struct {
-	Name    string
+	// Name of the table. This must match the key in the Tables map in DBSchema.
+	Name string
+
+	// Indexes is the set of indexes for querying this table. The key
+	// is a unique name for the index and must match the Name in the
+	// IndexSchema.
 	Indexes map[string]*IndexSchema
 	Indexes map[string]*IndexSchema
 }
 }
 
 
@@ -37,35 +51,50 @@ func (s *TableSchema) Validate() error {
 	if s.Name == "" {
 	if s.Name == "" {
 		return fmt.Errorf("missing table name")
 		return fmt.Errorf("missing table name")
 	}
 	}
+
 	if len(s.Indexes) == 0 {
 	if len(s.Indexes) == 0 {
-		return fmt.Errorf("missing table schemas for '%s'", s.Name)
+		return fmt.Errorf("missing table indexes for '%s'", s.Name)
 	}
 	}
+
 	if _, ok := s.Indexes["id"]; !ok {
 	if _, ok := s.Indexes["id"]; !ok {
 		return fmt.Errorf("must have id index")
 		return fmt.Errorf("must have id index")
 	}
 	}
+
 	if !s.Indexes["id"].Unique {
 	if !s.Indexes["id"].Unique {
 		return fmt.Errorf("id index must be unique")
 		return fmt.Errorf("id index must be unique")
 	}
 	}
+
 	if _, ok := s.Indexes["id"].Indexer.(SingleIndexer); !ok {
 	if _, ok := s.Indexes["id"].Indexer.(SingleIndexer); !ok {
 		return fmt.Errorf("id index must be a SingleIndexer")
 		return fmt.Errorf("id index must be a SingleIndexer")
 	}
 	}
+
 	for name, index := range s.Indexes {
 	for name, index := range s.Indexes {
 		if name != index.Name {
 		if name != index.Name {
 			return fmt.Errorf("index name mis-match for '%s'", name)
 			return fmt.Errorf("index name mis-match for '%s'", name)
 		}
 		}
+
 		if err := index.Validate(); err != nil {
 		if err := index.Validate(); err != nil {
-			return err
+			return fmt.Errorf("index %q: %s", name, err)
 		}
 		}
 	}
 	}
+
 	return nil
 	return nil
 }
 }
 
 
-// IndexSchema contains the schema for an index
+// IndexSchema is the schema for an index. An index defines how a table is
+// queried.
 type IndexSchema struct {
 type IndexSchema struct {
-	Name         string
+	// Name of the index. This must be unique among a tables set of indexes.
+	// This must match the key in the map of Indexes for a TableSchema.
+	Name string
+
+	// AllowMissing if true ignores this index if it doesn't produce a
+	// value. For example, an index that extracts a field that doesn't
+	// exist from a structure.
 	AllowMissing bool
 	AllowMissing bool
-	Unique       bool
-	Indexer      Indexer
+
+	Unique  bool
+	Indexer Indexer
 }
 }
 
 
 func (s *IndexSchema) Validate() error {
 func (s *IndexSchema) Validate() error {

+ 494 - 34
vendor/github.com/hashicorp/go-memdb/txn.go

@@ -7,13 +7,18 @@ import (
 	"sync/atomic"
 	"sync/atomic"
 	"unsafe"
 	"unsafe"
 
 
-	"github.com/hashicorp/go-immutable-radix"
+	iradix "github.com/hashicorp/go-immutable-radix"
 )
 )
 
 
 const (
 const (
 	id = "id"
 	id = "id"
 )
 )
 
 
+var (
+	// ErrNotFound is returned when the requested item is not found
+	ErrNotFound = fmt.Errorf("not found")
+)
+
 // tableIndex is a tuple of (Table, Index) used for lookups
 // tableIndex is a tuple of (Table, Index) used for lookups
 type tableIndex struct {
 type tableIndex struct {
 	Table string
 	Table string
@@ -28,19 +33,35 @@ type Txn struct {
 	rootTxn *iradix.Txn
 	rootTxn *iradix.Txn
 	after   []func()
 	after   []func()
 
 
+	// changes is used to track the changes performed during the transaction. If
+	// it is nil at transaction start then changes are not tracked.
+	changes Changes
+
 	modified map[tableIndex]*iradix.Txn
 	modified map[tableIndex]*iradix.Txn
 }
 }
 
 
-// readableIndex returns a transaction usable for reading the given
-// index in a table. If a write transaction is in progress, we may need
-// to use an existing modified txn.
+// TrackChanges enables change tracking for the transaction. If called at any
+// point before commit, subsequent mutations will be recorded and can be
+// retrieved using ChangeSet. Once this has been called on a transaction it
+// can't be unset. As with other Txn methods it's not safe to call this from a
+// different goroutine than the one making mutations or committing the
+// transaction.
+func (txn *Txn) TrackChanges() {
+	if txn.changes == nil {
+		txn.changes = make(Changes, 0, 1)
+	}
+}
+
+// readableIndex returns a transaction usable for reading the given index in a
+// table. If the transaction is a write transaction with modifications, a clone of the
+// modified index will be returned.
 func (txn *Txn) readableIndex(table, index string) *iradix.Txn {
 func (txn *Txn) readableIndex(table, index string) *iradix.Txn {
 	// Look for existing transaction
 	// Look for existing transaction
 	if txn.write && txn.modified != nil {
 	if txn.write && txn.modified != nil {
 		key := tableIndex{table, index}
 		key := tableIndex{table, index}
 		exist, ok := txn.modified[key]
 		exist, ok := txn.modified[key]
 		if ok {
 		if ok {
-			return exist
+			return exist.Clone()
 		}
 		}
 	}
 	}
 
 
@@ -70,6 +91,11 @@ func (txn *Txn) writableIndex(table, index string) *iradix.Txn {
 	raw, _ := txn.rootTxn.Get(path)
 	raw, _ := txn.rootTxn.Get(path)
 	indexTxn := raw.(*iradix.Tree).Txn()
 	indexTxn := raw.(*iradix.Tree).Txn()
 
 
+	// If we are the primary DB, enable mutation tracking. Snapshots should
+	// not notify, otherwise we will trigger watches on the primary DB when
+	// the writes will not be visible.
+	indexTxn.TrackMutate(txn.db.primary)
+
 	// Keep this open for the duration of the txn
 	// Keep this open for the duration of the txn
 	txn.modified[key] = indexTxn
 	txn.modified[key] = indexTxn
 	return indexTxn
 	return indexTxn
@@ -91,6 +117,7 @@ func (txn *Txn) Abort() {
 	// Clear the txn
 	// Clear the txn
 	txn.rootTxn = nil
 	txn.rootTxn = nil
 	txn.modified = nil
 	txn.modified = nil
+	txn.changes = nil
 
 
 	// Release the writer lock since this is invalid
 	// Release the writer lock since this is invalid
 	txn.db.writer.Unlock()
 	txn.db.writer.Unlock()
@@ -112,14 +139,23 @@ func (txn *Txn) Commit() {
 	// Commit each sub-transaction scoped to (table, index)
 	// Commit each sub-transaction scoped to (table, index)
 	for key, subTxn := range txn.modified {
 	for key, subTxn := range txn.modified {
 		path := indexPath(key.Table, key.Index)
 		path := indexPath(key.Table, key.Index)
-		final := subTxn.Commit()
+		final := subTxn.CommitOnly()
 		txn.rootTxn.Insert(path, final)
 		txn.rootTxn.Insert(path, final)
 	}
 	}
 
 
 	// Update the root of the DB
 	// Update the root of the DB
-	newRoot := txn.rootTxn.Commit()
+	newRoot := txn.rootTxn.CommitOnly()
 	atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot))
 	atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot))
 
 
+	// Now issue all of the mutation updates (this is safe to call
+	// even if mutation tracking isn't enabled); we do this after
+	// the root pointer is swapped so that waking responders will
+	// see the new state.
+	for _, subTxn := range txn.modified {
+		subTxn.Notify()
+	}
+	txn.rootTxn.Notify()
+
 	// Clear the txn
 	// Clear the txn
 	txn.rootTxn = nil
 	txn.rootTxn = nil
 	txn.modified = nil
 	txn.modified = nil
@@ -134,7 +170,11 @@ func (txn *Txn) Commit() {
 	}
 	}
 }
 }
 
 
-// Insert is used to add or update an object into the given table
+// Insert is used to add or update an object into the given table.
+//
+// When updating an object, the obj provided should be a copy rather
+// than a value updated in-place. Modifying values in-place that are already
+// inserted into MemDB is not supported behavior.
 func (txn *Txn) Insert(table string, obj interface{}) error {
 func (txn *Txn) Insert(table string, obj interface{}) error {
 	if !txn.write {
 	if !txn.write {
 		return fmt.Errorf("cannot insert in read-only transaction")
 		return fmt.Errorf("cannot insert in read-only transaction")
@@ -246,11 +286,19 @@ func (txn *Txn) Insert(table string, obj interface{}) error {
 			indexTxn.Insert(val, obj)
 			indexTxn.Insert(val, obj)
 		}
 		}
 	}
 	}
+	if txn.changes != nil {
+		txn.changes = append(txn.changes, Change{
+			Table:      table,
+			Before:     existing, // might be nil on a create
+			After:      obj,
+			primaryKey: idVal,
+		})
+	}
 	return nil
 	return nil
 }
 }
 
 
-// Delete is used to delete a single object from the given table
-// This object must already exist in the table
+// Delete is used to delete a single object from the given table.
+// This object must already exist in the table.
 func (txn *Txn) Delete(table string, obj interface{}) error {
 func (txn *Txn) Delete(table string, obj interface{}) error {
 	if !txn.write {
 	if !txn.write {
 		return fmt.Errorf("cannot delete in read-only transaction")
 		return fmt.Errorf("cannot delete in read-only transaction")
@@ -277,7 +325,7 @@ func (txn *Txn) Delete(table string, obj interface{}) error {
 	idTxn := txn.writableIndex(table, id)
 	idTxn := txn.writableIndex(table, id)
 	existing, ok := idTxn.Get(idVal)
 	existing, ok := idTxn.Get(idVal)
 	if !ok {
 	if !ok {
-		return fmt.Errorf("not found")
+		return ErrNotFound
 	}
 	}
 
 
 	// Remove the object from all the indexes
 	// Remove the object from all the indexes
@@ -313,9 +361,121 @@ func (txn *Txn) Delete(table string, obj interface{}) error {
 			}
 			}
 		}
 		}
 	}
 	}
+	if txn.changes != nil {
+		txn.changes = append(txn.changes, Change{
+			Table:      table,
+			Before:     existing,
+			After:      nil, // Now nil indicates deletion
+			primaryKey: idVal,
+		})
+	}
 	return nil
 	return nil
 }
 }
 
 
+// DeletePrefix is used to delete an entire subtree based on a prefix.
+// The given index must be a prefix index, and will be used to perform a scan and enumerate the set of objects to delete.
+// These will be removed from all other indexes, and then a special prefix operation will delete the objects from the given index in an efficient subtree delete operation.
+// This is useful when you have a very large number of objects indexed by the given index, along with a much smaller number of entries in the other indexes for those objects.
+func (txn *Txn) DeletePrefix(table string, prefix_index string, prefix string) (bool, error) {
+	if !txn.write {
+		return false, fmt.Errorf("cannot delete in read-only transaction")
+	}
+
+	if !strings.HasSuffix(prefix_index, "_prefix") {
+		return false, fmt.Errorf("Index name for DeletePrefix must be a prefix index, Got %v ", prefix_index)
+	}
+
+	deletePrefixIndex := strings.TrimSuffix(prefix_index, "_prefix")
+
+	// Get an iterator over all of the keys with the given prefix.
+	entries, err := txn.Get(table, prefix_index, prefix)
+	if err != nil {
+		return false, fmt.Errorf("failed kvs lookup: %s", err)
+	}
+	// Get the table schema
+	tableSchema, ok := txn.db.schema.Tables[table]
+	if !ok {
+		return false, fmt.Errorf("invalid table '%s'", table)
+	}
+
+	foundAny := false
+	for entry := entries.Next(); entry != nil; entry = entries.Next() {
+		if !foundAny {
+			foundAny = true
+		}
+		// Get the primary ID of the object
+		idSchema := tableSchema.Indexes[id]
+		idIndexer := idSchema.Indexer.(SingleIndexer)
+		ok, idVal, err := idIndexer.FromObject(entry)
+		if err != nil {
+			return false, fmt.Errorf("failed to build primary index: %v", err)
+		}
+		if !ok {
+			return false, fmt.Errorf("object missing primary index")
+		}
+		if txn.changes != nil {
+			// Record the deletion
+			idTxn := txn.writableIndex(table, id)
+			existing, ok := idTxn.Get(idVal)
+			if ok {
+				txn.changes = append(txn.changes, Change{
+					Table:      table,
+					Before:     existing,
+					After:      nil, // Now nil indicates deletion
+					primaryKey: idVal,
+				})
+			}
+		}
+		// Remove the object from all the indexes except the given prefix index
+		for name, indexSchema := range tableSchema.Indexes {
+			if name == deletePrefixIndex {
+				continue
+			}
+			indexTxn := txn.writableIndex(table, name)
+
+			// Handle the update by deleting from the index first
+			var (
+				ok   bool
+				vals [][]byte
+				err  error
+			)
+			switch indexer := indexSchema.Indexer.(type) {
+			case SingleIndexer:
+				var val []byte
+				ok, val, err = indexer.FromObject(entry)
+				vals = [][]byte{val}
+			case MultiIndexer:
+				ok, vals, err = indexer.FromObject(entry)
+			}
+			if err != nil {
+				return false, fmt.Errorf("failed to build index '%s': %v", name, err)
+			}
+
+			if ok {
+				// Handle non-unique index by computing a unique index.
+				// This is done by appending the primary key which must
+				// be unique anyways.
+				for _, val := range vals {
+					if !indexSchema.Unique {
+						val = append(val, idVal...)
+					}
+					indexTxn.Delete(val)
+				}
+			}
+		}
+
+	}
+	if foundAny {
+		indexTxn := txn.writableIndex(table, deletePrefixIndex)
+		ok = indexTxn.DeletePrefix([]byte(prefix))
+		if !ok {
+			panic(fmt.Errorf("prefix %v matched some entries but DeletePrefix did not delete any ", prefix))
+		}
+		return true, nil
+	}
+	return false, nil
+}
+
 // DeleteAll is used to delete all the objects in a given table
 // DeleteAll is used to delete all the objects in a given table
 // matching the constraints on the index
 // matching the constraints on the index
 func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error) {
 func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error) {
@@ -352,13 +512,13 @@ func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error)
 	return num, nil
 	return num, nil
 }
 }
 
 
-// First is used to return the first matching object for
-// the given constraints on the index
-func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) {
+// FirstWatch is used to return the first matching object for
+// the given constraints on the index along with the watch channel
+func (txn *Txn) FirstWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) {
 	// Get the index value
 	// Get the index value
 	indexSchema, val, err := txn.getIndexValue(table, index, args...)
 	indexSchema, val, err := txn.getIndexValue(table, index, args...)
 	if err != nil {
 	if err != nil {
-		return nil, err
+		return nil, nil, err
 	}
 	}
 
 
 	// Get the index itself
 	// Get the index itself
@@ -366,18 +526,60 @@ func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, er
 
 
 	// Do an exact lookup
 	// Do an exact lookup
 	if indexSchema.Unique && val != nil && indexSchema.Name == index {
 	if indexSchema.Unique && val != nil && indexSchema.Name == index {
-		obj, ok := indexTxn.Get(val)
+		watch, obj, ok := indexTxn.GetWatch(val)
 		if !ok {
 		if !ok {
-			return nil, nil
+			return watch, nil, nil
 		}
 		}
-		return obj, nil
+		return watch, obj, nil
 	}
 	}
 
 
 	// Handle non-unique index by using an iterator and getting the first value
 	// Handle non-unique index by using an iterator and getting the first value
 	iter := indexTxn.Root().Iterator()
 	iter := indexTxn.Root().Iterator()
-	iter.SeekPrefix(val)
+	watch := iter.SeekPrefixWatch(val)
 	_, value, _ := iter.Next()
 	_, value, _ := iter.Next()
-	return value, nil
+	return watch, value, nil
+}
+
+// LastWatch is used to return the last matching object for
+// the given constraints on the index along with the watch channel
+func (txn *Txn) LastWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) {
+	// Get the index value
+	indexSchema, val, err := txn.getIndexValue(table, index, args...)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Get the index itself
+	indexTxn := txn.readableIndex(table, indexSchema.Name)
+
+	// Do an exact lookup
+	if indexSchema.Unique && val != nil && indexSchema.Name == index {
+		watch, obj, ok := indexTxn.GetWatch(val)
+		if !ok {
+			return watch, nil, nil
+		}
+		return watch, obj, nil
+	}
+
+	// Handle non-unique index by using an iterator and getting the last value
+	iter := indexTxn.Root().ReverseIterator()
+	watch := iter.SeekPrefixWatch(val)
+	_, value, _ := iter.Previous()
+	return watch, value, nil
+}
+
+// First is used to return the first matching object for
+// the given constraints on the index
+func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) {
+	_, val, err := txn.FirstWatch(table, index, args...)
+	return val, err
+}
+
+// Last is used to return the last matching object for
+// the given constraints on the index
+func (txn *Txn) Last(table, index string, args ...interface{}) (interface{}, error) {
+	_, val, err := txn.LastWatch(table, index, args...)
+	return val, err
 }
 }
 
 
 // LongestPrefix is used to fetch the longest prefix match for the given
 // LongestPrefix is used to fetch the longest prefix match for the given
@@ -465,30 +667,100 @@ func (txn *Txn) getIndexValue(table, index string, args ...interface{}) (*IndexS
 	return indexSchema, val, err
 	return indexSchema, val, err
 }
 }
 
 
-// ResultIterator is used to iterate over a list of results
-// from a Get query on a table.
+// ResultIterator is used to iterate over a list of results from a query on a table.
+//
+// When a ResultIterator is created from a write transaction, the results from
+// Next will reflect a snapshot of the table at the time the ResultIterator is
+// created.
+// This means that calling Insert or Delete on a transaction while iterating is
+// allowed, but the changes made by Insert or Delete will not be observed in the
+// results returned from subsequent calls to Next. For example if an item is deleted
+// from the index used by the iterator it will still be returned by Next. If an
+// item is inserted into the index used by the iterator, it will not be returned
+// by Next. However, an iterator created after a call to Insert or Delete will
+// reflect the modifications.
+//
+// When a ResultIterator is created from a write transaction, and there are already
+// modifications to the index used by the iterator, the modification cache of the
+// index will be invalidated. This may result in some additional allocations if
+// the same node in the index is modified again.
 type ResultIterator interface {
 type ResultIterator interface {
+	WatchCh() <-chan struct{}
+	// Next returns the next result from the iterator. If there are no more results
+	// nil is returned.
 	Next() interface{}
 	Next() interface{}
 }
 }
 
 
-// Get is used to construct a ResultIterator over all the
-// rows that match the given constraints of an index.
+// Get is used to construct a ResultIterator over all the rows that match the
+// given constraints of an index. The index values must match exactly (this
+// is not a range-based or prefix-based lookup) by default.
+//
+// Prefix lookups: if the named index implements PrefixIndexer, you may perform
+// prefix-based lookups by appending "_prefix" to the index name. In this
+// scenario, the index values given in args are treated as prefix lookups. For
+// example, a StringFieldIndex will match any string with the given value
+// as a prefix: "mem" matches "memdb".
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
 func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, error) {
 func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, error) {
-	// Get the index value to scan
-	indexSchema, val, err := txn.getIndexValue(table, index, args...)
+	indexIter, val, err := txn.getIndexIterator(table, index, args...)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	// Get the index itself
-	indexTxn := txn.readableIndex(table, indexSchema.Name)
-	indexRoot := indexTxn.Root()
+	// Seek the iterator to the appropriate sub-set
+	watchCh := indexIter.SeekPrefixWatch(val)
 
 
-	// Get an interator over the index
-	indexIter := indexRoot.Iterator()
+	// Create an iterator
+	iter := &radixIterator{
+		iter:    indexIter,
+		watchCh: watchCh,
+	}
+	return iter, nil
+}
+
+// GetReverse is used to construct a Reverse ResultIterator over all the
+// rows that match the given constraints of an index.
+// The returned ResultIterator's Next() will return the next Previous value.
+//
+// See the documentation on Get for details on arguments.
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) GetReverse(table, index string, args ...interface{}) (ResultIterator, error) {
+	indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...)
+	if err != nil {
+		return nil, err
+	}
+
+	// Seek the iterator to the appropriate sub-set
+	watchCh := indexIter.SeekPrefixWatch(val)
+
+	// Create an iterator
+	iter := &radixReverseIterator{
+		iter:    indexIter,
+		watchCh: watchCh,
+	}
+	return iter, nil
+}
+
+// LowerBound is used to construct a ResultIterator over all the the range of
+// rows that have an index value greater than or equal to the provide args.
+// Calling this then iterating until the rows are larger than required allows
+// range scans within an index. It is not possible to watch the resulting
+// iterator since the radix tree doesn't efficiently allow watching on lower
+// bound changes. The WatchCh returned will be nill and so will block forever.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) LowerBound(table, index string, args ...interface{}) (ResultIterator, error) {
+	indexIter, val, err := txn.getIndexIterator(table, index, args...)
+	if err != nil {
+		return nil, err
+	}
 
 
 	// Seek the iterator to the appropriate sub-set
 	// Seek the iterator to the appropriate sub-set
-	indexIter.SeekPrefix(val)
+	indexIter.SeekLowerBound(val)
 
 
 	// Create an iterator
 	// Create an iterator
 	iter := &radixIterator{
 	iter := &radixIterator{
@@ -497,6 +769,149 @@ func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, e
 	return iter, nil
 	return iter, nil
 }
 }
 
 
+// ReverseLowerBound is used to construct a Reverse ResultIterator over all the
+// the range of rows that have an index value less than or equal to the
+// provide args.  Calling this then iterating until the rows are lower than
+// required allows range scans within an index. It is not possible to watch the
+// resulting iterator since the radix tree doesn't efficiently allow watching
+// on lower bound changes. The WatchCh returned will be nill and so will block
+// forever.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) ReverseLowerBound(table, index string, args ...interface{}) (ResultIterator, error) {
+	indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...)
+	if err != nil {
+		return nil, err
+	}
+
+	// Seek the iterator to the appropriate sub-set
+	indexIter.SeekReverseLowerBound(val)
+
+	// Create an iterator
+	iter := &radixReverseIterator{
+		iter: indexIter,
+	}
+	return iter, nil
+}
+
+// objectID is a tuple of table name and the raw internal id byte slice
+// converted to a string. It's only converted to a string to make it comparable
+// so this struct can be used as a map index.
+type objectID struct {
+	Table    string
+	IndexVal string
+}
+
+// mutInfo stores metadata about mutations to allow collapsing multiple
+// mutations to the same object into one.
+type mutInfo struct {
+	firstBefore interface{}
+	lastIdx     int
+}
+
+// Changes returns the set of object changes that have been made in the
+// transaction so far. If change tracking is not enabled it wil always return
+// nil. It can be called before or after Commit. If it is before Commit it will
+// return all changes made so far which may not be the same as the final
+// Changes. After abort it will always return nil. As with other Txn methods
+// it's not safe to call this from a different goroutine than the one making
+// mutations or committing the transaction. Mutations will appear in the order
+// they were performed in the transaction but multiple operations to the same
+// object will be collapsed so only the effective overall change to that object
+// is present. If transaction operations are dependent (e.g. copy object X to Y
+// then delete X) this might mean the set of mutations is incomplete to verify
+// history, but it is complete in that the net effect is preserved (Y got a new
+// value, X got removed).
+func (txn *Txn) Changes() Changes {
+	if txn.changes == nil {
+		return nil
+	}
+
+	// De-duplicate mutations by key so all take effect at the point of the last
+	// write but we keep the mutations in order.
+	dups := make(map[objectID]mutInfo)
+	for i, m := range txn.changes {
+		oid := objectID{
+			Table:    m.Table,
+			IndexVal: string(m.primaryKey),
+		}
+		// Store the latest mutation index for each key value
+		mi, ok := dups[oid]
+		if !ok {
+			// First entry for key, store the before value
+			mi.firstBefore = m.Before
+		}
+		mi.lastIdx = i
+		dups[oid] = mi
+	}
+	if len(dups) == len(txn.changes) {
+		// No duplicates found, fast path return it as is
+		return txn.changes
+	}
+
+	// Need to remove the duplicates
+	cs := make(Changes, 0, len(dups))
+	for i, m := range txn.changes {
+		oid := objectID{
+			Table:    m.Table,
+			IndexVal: string(m.primaryKey),
+		}
+		mi := dups[oid]
+		if mi.lastIdx == i {
+			// This was the latest value for this key copy it with the before value in
+			// case it's different. Note that m is not a pointer so we are not
+			// modifying the txn.changeSet here - it's already a copy.
+			m.Before = mi.firstBefore
+
+			// Edge case - if the object was inserted and then eventually deleted in
+			// the same transaction, then the net affect on that key is a no-op. Don't
+			// emit a mutation with nil for before and after as it's meaningless and
+			// might violate expectations and cause a panic in code that assumes at
+			// least one must be set.
+			if m.Before == nil && m.After == nil {
+				continue
+			}
+			cs = append(cs, m)
+		}
+	}
+	// Store the de-duped version in case this is called again
+	txn.changes = cs
+	return cs
+}
+
+func (txn *Txn) getIndexIterator(table, index string, args ...interface{}) (*iradix.Iterator, []byte, error) {
+	// Get the index value to scan
+	indexSchema, val, err := txn.getIndexValue(table, index, args...)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Get the index itself
+	indexTxn := txn.readableIndex(table, indexSchema.Name)
+	indexRoot := indexTxn.Root()
+
+	// Get an iterator over the index
+	indexIter := indexRoot.Iterator()
+	return indexIter, val, nil
+}
+
+func (txn *Txn) getIndexIteratorReverse(table, index string, args ...interface{}) (*iradix.ReverseIterator, []byte, error) {
+	// Get the index value to scan
+	indexSchema, val, err := txn.getIndexValue(table, index, args...)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Get the index itself
+	indexTxn := txn.readableIndex(table, indexSchema.Name)
+	indexRoot := indexTxn.Root()
+
+	// Get an interator over the index
+	indexIter := indexRoot.ReverseIterator()
+	return indexIter, val, nil
+}
+
 // Defer is used to push a new arbitrary function onto a stack which
 // Defer is used to push a new arbitrary function onto a stack which
 // gets called when a transaction is committed and finished. Deferred
 // gets called when a transaction is committed and finished. Deferred
 // functions are called in LIFO order, and only invoked at the end of
 // functions are called in LIFO order, and only invoked at the end of
@@ -506,10 +921,15 @@ func (txn *Txn) Defer(fn func()) {
 }
 }
 
 
 // radixIterator is used to wrap an underlying iradix iterator.
 // radixIterator is used to wrap an underlying iradix iterator.
-// This is much mroe efficient than a sliceIterator as we are not
+// This is much more efficient than a sliceIterator as we are not
 // materializing the entire view.
 // materializing the entire view.
 type radixIterator struct {
 type radixIterator struct {
-	iter *iradix.Iterator
+	iter    *iradix.Iterator
+	watchCh <-chan struct{}
+}
+
+func (r *radixIterator) WatchCh() <-chan struct{} {
+	return r.watchCh
 }
 }
 
 
 func (r *radixIterator) Next() interface{} {
 func (r *radixIterator) Next() interface{} {
@@ -519,3 +939,43 @@ func (r *radixIterator) Next() interface{} {
 	}
 	}
 	return value
 	return value
 }
 }
+
+type radixReverseIterator struct {
+	iter    *iradix.ReverseIterator
+	watchCh <-chan struct{}
+}
+
+func (r *radixReverseIterator) Next() interface{} {
+	_, value, ok := r.iter.Previous()
+	if !ok {
+		return nil
+	}
+	return value
+}
+
+func (r *radixReverseIterator) WatchCh() <-chan struct{} {
+	return r.watchCh
+}
+
+// Snapshot creates a snapshot of the current state of the transaction.
+// Returns a new read-only transaction or nil if the transaction is already
+// aborted or committed.
+func (txn *Txn) Snapshot() *Txn {
+	if txn.rootTxn == nil {
+		return nil
+	}
+
+	snapshot := &Txn{
+		db:      txn.db,
+		rootTxn: txn.rootTxn.Clone(),
+	}
+
+	// Commit sub-transactions into the snapshot
+	for key, subTxn := range txn.modified {
+		path := indexPath(key.Table, key.Index)
+		final := subTxn.CommitOnly()
+		snapshot.rootTxn.Insert(path, final)
+	}
+
+	return snapshot
+}

+ 144 - 0
vendor/github.com/hashicorp/go-memdb/watch.go

@@ -0,0 +1,144 @@
+package memdb
+
+import (
+	"context"
+	"time"
+)
+
+// WatchSet is a collection of watch channels.
+type WatchSet map[<-chan struct{}]struct{}
+
+// NewWatchSet constructs a new watch set.
+func NewWatchSet() WatchSet {
+	return make(map[<-chan struct{}]struct{})
+}
+
+// Add appends a watchCh to the WatchSet if non-nil.
+func (w WatchSet) Add(watchCh <-chan struct{}) {
+	if w == nil {
+		return
+	}
+
+	if _, ok := w[watchCh]; !ok {
+		w[watchCh] = struct{}{}
+	}
+}
+
+// AddWithLimit appends a watchCh to the WatchSet if non-nil, and if the given
+// softLimit hasn't been exceeded. Otherwise, it will watch the given alternate
+// channel. It's expected that the altCh will be the same on many calls to this
+// function, so you will exceed the soft limit a little bit if you hit this, but
+// not by much.
+//
+// This is useful if you want to track individual items up to some limit, after
+// which you watch a higher-level channel (usually a channel from start start of
+// an iterator higher up in the radix tree) that will watch a superset of items.
+func (w WatchSet) AddWithLimit(softLimit int, watchCh <-chan struct{}, altCh <-chan struct{}) {
+	// This is safe for a nil WatchSet so we don't need to check that here.
+	if len(w) < softLimit {
+		w.Add(watchCh)
+	} else {
+		w.Add(altCh)
+	}
+}
+
+// Watch is used to wait for either the watch set to trigger or a timeout.
+// Returns true on timeout.
+func (w WatchSet) Watch(timeoutCh <-chan time.Time) bool {
+	if w == nil {
+		return false
+	}
+
+	// Create a context that gets cancelled when the timeout is triggered
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	go func() {
+		select {
+		case <-timeoutCh:
+			cancel()
+		case <-ctx.Done():
+		}
+	}()
+
+	return w.WatchCtx(ctx) == context.Canceled
+}
+
+// WatchCtx is used to wait for either the watch set to trigger or for the
+// context to be cancelled. Watch with a timeout channel can be mimicked by
+// creating a context with a deadline. WatchCtx should be preferred over Watch.
+func (w WatchSet) WatchCtx(ctx context.Context) error {
+	if w == nil {
+		return nil
+	}
+
+	if n := len(w); n <= aFew {
+		idx := 0
+		chunk := make([]<-chan struct{}, aFew)
+		for watchCh := range w {
+			chunk[idx] = watchCh
+			idx++
+		}
+		return watchFew(ctx, chunk)
+	}
+
+	return w.watchMany(ctx)
+}
+
+// watchMany is used if there are many watchers.
+func (w WatchSet) watchMany(ctx context.Context) error {
+	// Set up a goroutine for each watcher.
+	triggerCh := make(chan struct{}, 1)
+	watcher := func(chunk []<-chan struct{}) {
+		if err := watchFew(ctx, chunk); err == nil {
+			select {
+			case triggerCh <- struct{}{}:
+			default:
+			}
+		}
+	}
+
+	// Apportion the watch channels into chunks we can feed into the
+	// watchFew helper.
+	idx := 0
+	chunk := make([]<-chan struct{}, aFew)
+	for watchCh := range w {
+		subIdx := idx % aFew
+		chunk[subIdx] = watchCh
+		idx++
+
+		// Fire off this chunk and start a fresh one.
+		if idx%aFew == 0 {
+			go watcher(chunk)
+			chunk = make([]<-chan struct{}, aFew)
+		}
+	}
+
+	// Make sure to watch any residual channels in the last chunk.
+	if idx%aFew != 0 {
+		go watcher(chunk)
+	}
+
+	// Wait for a channel to trigger or timeout.
+	select {
+	case <-triggerCh:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+}
+
+// WatchCh returns a channel that is used to wait for either the watch set to trigger
+// or for the context to be cancelled. WatchCh creates a new goroutine each call, so
+// callers may need to cache the returned channel to avoid creating extra goroutines.
+func (w WatchSet) WatchCh(ctx context.Context) <-chan error {
+	// Create the outgoing channel
+	triggerCh := make(chan error, 1)
+
+	// Create a goroutine to collect the error from WatchCtx
+	go func() {
+		triggerCh <- w.WatchCtx(ctx)
+	}()
+
+	return triggerCh
+}

+ 117 - 0
vendor/github.com/hashicorp/go-memdb/watch_few.go

@@ -0,0 +1,117 @@
+package memdb
+
+//go:generate sh -c "go run watch-gen/main.go >watch_few.go"
+
+import (
+	"context"
+)
+
+// aFew gives how many watchers this function is wired to support. You must
+// always pass a full slice of this length, but unused channels can be nil.
+const aFew = 32
+
+// watchFew is used if there are only a few watchers as a performance
+// optimization.
+func watchFew(ctx context.Context, ch []<-chan struct{}) error {
+	select {
+
+	case <-ch[0]:
+		return nil
+
+	case <-ch[1]:
+		return nil
+
+	case <-ch[2]:
+		return nil
+
+	case <-ch[3]:
+		return nil
+
+	case <-ch[4]:
+		return nil
+
+	case <-ch[5]:
+		return nil
+
+	case <-ch[6]:
+		return nil
+
+	case <-ch[7]:
+		return nil
+
+	case <-ch[8]:
+		return nil
+
+	case <-ch[9]:
+		return nil
+
+	case <-ch[10]:
+		return nil
+
+	case <-ch[11]:
+		return nil
+
+	case <-ch[12]:
+		return nil
+
+	case <-ch[13]:
+		return nil
+
+	case <-ch[14]:
+		return nil
+
+	case <-ch[15]:
+		return nil
+
+	case <-ch[16]:
+		return nil
+
+	case <-ch[17]:
+		return nil
+
+	case <-ch[18]:
+		return nil
+
+	case <-ch[19]:
+		return nil
+
+	case <-ch[20]:
+		return nil
+
+	case <-ch[21]:
+		return nil
+
+	case <-ch[22]:
+		return nil
+
+	case <-ch[23]:
+		return nil
+
+	case <-ch[24]:
+		return nil
+
+	case <-ch[25]:
+		return nil
+
+	case <-ch[26]:
+		return nil
+
+	case <-ch[27]:
+		return nil
+
+	case <-ch[28]:
+		return nil
+
+	case <-ch[29]:
+		return nil
+
+	case <-ch[30]:
+		return nil
+
+	case <-ch[31]:
+		return nil
+
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+}

+ 8 - 0
vendor/github.com/ishidawataru/sctp/sctp.go

@@ -505,6 +505,14 @@ func (c *SCTPConn) GetDefaultSentParam() (*SndRcvInfo, error) {
 	return info, err
 	return info, err
 }
 }
 
 
+func (c *SCTPConn) Getsockopt(optname, optval, optlen uintptr) (uintptr, uintptr, error) {
+	return getsockopt(c.fd(), optname, optval, optlen)
+}
+
+func (c *SCTPConn) Setsockopt(optname, optval, optlen uintptr) (uintptr, uintptr, error) {
+	return setsockopt(c.fd(), optname, optval, optlen)
+}
+
 func resolveFromRawAddr(ptr unsafe.Pointer, n int) (*SCTPAddr, error) {
 func resolveFromRawAddr(ptr unsafe.Pointer, n int) (*SCTPAddr, error) {
 	addr := &SCTPAddr{
 	addr := &SCTPAddr{
 		IPAddrs: make([]net.IPAddr, n),
 		IPAddrs: make([]net.IPAddr, n),

+ 0 - 0
vendor/github.com/docker/swarmkit/LICENSE → vendor/github.com/moby/swarmkit/v2/LICENSE


+ 3 - 3
vendor/github.com/docker/swarmkit/agent/agent.go → vendor/github.com/moby/swarmkit/v2/agent/agent.go

@@ -8,9 +8,9 @@ import (
 	"sync"
 	"sync"
 	"time"
 	"time"
 
 
-	"github.com/docker/swarmkit/agent/exec"
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/log"
+	"github.com/moby/swarmkit/v2/agent/exec"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/log"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 

+ 3 - 3
vendor/github.com/docker/swarmkit/agent/config.go → vendor/github.com/moby/swarmkit/v2/agent/config.go

@@ -2,9 +2,9 @@ package agent
 
 
 import (
 import (
 	"github.com/docker/go-events"
 	"github.com/docker/go-events"
-	"github.com/docker/swarmkit/agent/exec"
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/connectionbroker"
+	"github.com/moby/swarmkit/v2/agent/exec"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/connectionbroker"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	bolt "go.etcd.io/bbolt"
 	bolt "go.etcd.io/bbolt"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/credentials"

+ 2 - 2
vendor/github.com/docker/swarmkit/agent/configs/configs.go → vendor/github.com/moby/swarmkit/v2/agent/configs/configs.go

@@ -4,8 +4,8 @@ import (
 	"fmt"
 	"fmt"
 	"sync"
 	"sync"
 
 
-	"github.com/docker/swarmkit/agent/exec"
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/agent/exec"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 // configs is a map that keeps all the currently available configs to the agent
 // configs is a map that keeps all the currently available configs to the agent

+ 1 - 1
vendor/github.com/docker/swarmkit/agent/csi/plugin/manager.go → vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/manager.go

@@ -7,7 +7,7 @@ import (
 
 
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/plugingetter"
 
 
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 const (
 const (

+ 2 - 2
vendor/github.com/docker/swarmkit/agent/csi/plugin/plugin.go → vendor/github.com/moby/swarmkit/v2/agent/csi/plugin/plugin.go

@@ -12,8 +12,8 @@ import (
 
 
 	"github.com/container-storage-interface/spec/lib/go/csi"
 	"github.com/container-storage-interface/spec/lib/go/csi"
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/plugingetter"
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/log"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/log"
 )
 )
 
 
 // SecretGetter is a reimplementation of the exec.SecretGetter interface in the
 // SecretGetter is a reimplementation of the exec.SecretGetter interface in the

+ 5 - 5
vendor/github.com/docker/swarmkit/agent/csi/volumes.go → vendor/github.com/moby/swarmkit/v2/agent/csi/volumes.go

@@ -9,11 +9,11 @@ import (
 
 
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/plugingetter"
 
 
-	"github.com/docker/swarmkit/agent/csi/plugin"
-	"github.com/docker/swarmkit/agent/exec"
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/log"
-	"github.com/docker/swarmkit/volumequeue"
+	"github.com/moby/swarmkit/v2/agent/csi/plugin"
+	"github.com/moby/swarmkit/v2/agent/exec"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/log"
+	"github.com/moby/swarmkit/v2/volumequeue"
 )
 )
 
 
 // volumeState keeps track of the state of a volume on this node.
 // volumeState keeps track of the state of a volume on this node.

+ 5 - 5
vendor/github.com/docker/swarmkit/agent/dependency.go → vendor/github.com/moby/swarmkit/v2/agent/dependency.go

@@ -3,11 +3,11 @@ package agent
 import (
 import (
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/plugingetter"
 
 
-	"github.com/docker/swarmkit/agent/configs"
-	"github.com/docker/swarmkit/agent/csi"
-	"github.com/docker/swarmkit/agent/exec"
-	"github.com/docker/swarmkit/agent/secrets"
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/agent/configs"
+	"github.com/moby/swarmkit/v2/agent/csi"
+	"github.com/moby/swarmkit/v2/agent/exec"
+	"github.com/moby/swarmkit/v2/agent/secrets"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 type dependencyManager struct {
 type dependencyManager struct {

+ 0 - 0
vendor/github.com/docker/swarmkit/agent/errors.go → vendor/github.com/moby/swarmkit/v2/agent/errors.go


+ 4 - 4
vendor/github.com/docker/swarmkit/agent/exec/controller.go → vendor/github.com/moby/swarmkit/v2/agent/exec/controller.go

@@ -5,10 +5,10 @@ import (
 	"fmt"
 	"fmt"
 	"time"
 	"time"
 
 
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/api/equality"
-	"github.com/docker/swarmkit/log"
-	"github.com/docker/swarmkit/protobuf/ptypes"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/api/equality"
+	"github.com/moby/swarmkit/v2/log"
+	"github.com/moby/swarmkit/v2/protobuf/ptypes"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 )
 )

+ 1 - 1
vendor/github.com/docker/swarmkit/agent/exec/controller_stub.go → vendor/github.com/moby/swarmkit/v2/agent/exec/controller_stub.go

@@ -5,7 +5,7 @@ import (
 	"runtime"
 	"runtime"
 	"strings"
 	"strings"
 
 
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 // StubController implements the Controller interface,
 // StubController implements the Controller interface,

+ 0 - 0
vendor/github.com/docker/swarmkit/agent/exec/errors.go → vendor/github.com/moby/swarmkit/v2/agent/exec/errors.go


+ 1 - 1
vendor/github.com/docker/swarmkit/agent/exec/executor.go → vendor/github.com/moby/swarmkit/v2/agent/exec/executor.go

@@ -3,7 +3,7 @@ package exec
 import (
 import (
 	"context"
 	"context"
 
 
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 // Executor provides controllers for tasks.
 // Executor provides controllers for tasks.

+ 0 - 0
vendor/github.com/docker/swarmkit/agent/helpers.go → vendor/github.com/moby/swarmkit/v2/agent/helpers.go


+ 2 - 2
vendor/github.com/docker/swarmkit/agent/reporter.go → vendor/github.com/moby/swarmkit/v2/agent/reporter.go

@@ -5,8 +5,8 @@ import (
 	"reflect"
 	"reflect"
 	"sync"
 	"sync"
 
 
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/log"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/log"
 )
 )
 
 
 // StatusReporter receives updates to task status. Method may be called
 // StatusReporter receives updates to task status. Method may be called

+ 1 - 1
vendor/github.com/docker/swarmkit/agent/resource.go → vendor/github.com/moby/swarmkit/v2/agent/resource.go

@@ -3,7 +3,7 @@ package agent
 import (
 import (
 	"context"
 	"context"
 
 
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 type resourceAllocator struct {
 type resourceAllocator struct {

+ 3 - 3
vendor/github.com/docker/swarmkit/agent/secrets/secrets.go → vendor/github.com/moby/swarmkit/v2/agent/secrets/secrets.go

@@ -4,9 +4,9 @@ import (
 	"fmt"
 	"fmt"
 	"sync"
 	"sync"
 
 
-	"github.com/docker/swarmkit/agent/exec"
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/identity"
+	"github.com/moby/swarmkit/v2/agent/exec"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/identity"
 )
 )
 
 
 // secrets is a map that keeps all the currently available secrets to the agent
 // secrets is a map that keeps all the currently available secrets to the agent

+ 3 - 3
vendor/github.com/docker/swarmkit/agent/session.go → vendor/github.com/moby/swarmkit/v2/agent/session.go

@@ -7,9 +7,9 @@ import (
 	"sync"
 	"sync"
 	"time"
 	"time"
 
 
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/connectionbroker"
-	"github.com/docker/swarmkit/log"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/connectionbroker"
+	"github.com/moby/swarmkit/v2/log"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/codes"

+ 1 - 1
vendor/github.com/docker/swarmkit/agent/storage.go → vendor/github.com/moby/swarmkit/v2/agent/storage.go

@@ -1,7 +1,7 @@
 package agent
 package agent
 
 
 import (
 import (
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 	"github.com/gogo/protobuf/proto"
 	"github.com/gogo/protobuf/proto"
 	bolt "go.etcd.io/bbolt"
 	bolt "go.etcd.io/bbolt"
 )
 )

+ 4 - 4
vendor/github.com/docker/swarmkit/agent/task.go → vendor/github.com/moby/swarmkit/v2/agent/task.go

@@ -5,10 +5,10 @@ import (
 	"sync"
 	"sync"
 	"time"
 	"time"
 
 
-	"github.com/docker/swarmkit/agent/exec"
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/api/equality"
-	"github.com/docker/swarmkit/log"
+	"github.com/moby/swarmkit/v2/agent/exec"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/api/equality"
+	"github.com/moby/swarmkit/v2/log"
 )
 )
 
 
 // taskManager manages all aspects of task execution and reporting for an agent
 // taskManager manages all aspects of task execution and reporting for an agent

+ 4 - 4
vendor/github.com/docker/swarmkit/agent/worker.go → vendor/github.com/moby/swarmkit/v2/agent/worker.go

@@ -4,10 +4,10 @@ import (
 	"context"
 	"context"
 	"sync"
 	"sync"
 
 
-	"github.com/docker/swarmkit/agent/exec"
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/log"
-	"github.com/docker/swarmkit/watch"
+	"github.com/moby/swarmkit/v2/agent/exec"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/log"
+	"github.com/moby/swarmkit/v2/watch"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
 	bolt "go.etcd.io/bbolt"
 	bolt "go.etcd.io/bbolt"
 )
 )

+ 0 - 0
vendor/github.com/docker/swarmkit/api/README.md → vendor/github.com/moby/swarmkit/v2/api/README.md


+ 0 - 0
vendor/github.com/docker/swarmkit/api/api.pb.txt → vendor/github.com/moby/swarmkit/v2/api/api.pb.txt


+ 6 - 6
vendor/github.com/docker/swarmkit/api/ca.pb.go → vendor/github.com/moby/swarmkit/v2/api/ca.pb.go

@@ -6,11 +6,11 @@ package api
 import (
 import (
 	context "context"
 	context "context"
 	fmt "fmt"
 	fmt "fmt"
-	github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
-	raftselector "github.com/docker/swarmkit/manager/raftselector"
-	_ "github.com/docker/swarmkit/protobuf/plugin"
 	_ "github.com/gogo/protobuf/gogoproto"
 	_ "github.com/gogo/protobuf/gogoproto"
 	proto "github.com/gogo/protobuf/proto"
 	proto "github.com/gogo/protobuf/proto"
+	github_com_moby_swarmkit_v2_api_deepcopy "github.com/moby/swarmkit/v2/api/deepcopy"
+	raftselector "github.com/moby/swarmkit/v2/manager/raftselector"
+	_ "github.com/moby/swarmkit/v2/protobuf/plugin"
 	grpc "google.golang.org/grpc"
 	grpc "google.golang.org/grpc"
 	codes "google.golang.org/grpc/codes"
 	codes "google.golang.org/grpc/codes"
 	metadata "google.golang.org/grpc/metadata"
 	metadata "google.golang.org/grpc/metadata"
@@ -469,11 +469,11 @@ func (m *NodeCertificateStatusResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Status != nil {
 	if o.Status != nil {
 		m.Status = &IssuanceStatus{}
 		m.Status = &IssuanceStatus{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Status, o.Status)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Status, o.Status)
 	}
 	}
 	if o.Certificate != nil {
 	if o.Certificate != nil {
 		m.Certificate = &Certificate{}
 		m.Certificate = &Certificate{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Certificate, o.Certificate)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Certificate, o.Certificate)
 	}
 	}
 }
 }
 
 
@@ -567,7 +567,7 @@ func (m *GetUnlockKeyResponse) CopyFrom(src interface{}) {
 		m.UnlockKey = make([]byte, len(o.UnlockKey))
 		m.UnlockKey = make([]byte, len(o.UnlockKey))
 		copy(m.UnlockKey, o.UnlockKey)
 		copy(m.UnlockKey, o.UnlockKey)
 	}
 	}
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Version, &o.Version)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Version, &o.Version)
 }
 }
 
 
 // Reference imports to suppress errors if they are not otherwise used.
 // Reference imports to suppress errors if they are not otherwise used.

+ 0 - 0
vendor/github.com/docker/swarmkit/api/ca.proto → vendor/github.com/moby/swarmkit/v2/api/ca.proto


+ 70 - 70
vendor/github.com/docker/swarmkit/api/control.pb.go → vendor/github.com/moby/swarmkit/v2/api/control.pb.go

@@ -6,13 +6,13 @@ package api
 import (
 import (
 	context "context"
 	context "context"
 	fmt "fmt"
 	fmt "fmt"
-	github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
-	raftselector "github.com/docker/swarmkit/manager/raftselector"
-	_ "github.com/docker/swarmkit/protobuf/plugin"
 	_ "github.com/gogo/protobuf/gogoproto"
 	_ "github.com/gogo/protobuf/gogoproto"
 	proto "github.com/gogo/protobuf/proto"
 	proto "github.com/gogo/protobuf/proto"
 	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
 	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
 	types "github.com/gogo/protobuf/types"
 	types "github.com/gogo/protobuf/types"
+	github_com_moby_swarmkit_v2_api_deepcopy "github.com/moby/swarmkit/v2/api/deepcopy"
+	raftselector "github.com/moby/swarmkit/v2/manager/raftselector"
+	_ "github.com/moby/swarmkit/v2/protobuf/plugin"
 	grpc "google.golang.org/grpc"
 	grpc "google.golang.org/grpc"
 	codes "google.golang.org/grpc/codes"
 	codes "google.golang.org/grpc/codes"
 	metadata "google.golang.org/grpc/metadata"
 	metadata "google.golang.org/grpc/metadata"
@@ -4436,7 +4436,7 @@ func (m *GetNodeResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Node != nil {
 	if o.Node != nil {
 		m.Node = &Node{}
 		m.Node = &Node{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Node, o.Node)
 	}
 	}
 }
 }
 
 
@@ -4455,7 +4455,7 @@ func (m *ListNodesRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Filters != nil {
 	if o.Filters != nil {
 		m.Filters = &ListNodesRequest_Filters{}
 		m.Filters = &ListNodesRequest_Filters{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
 	}
 	}
 }
 }
 
 
@@ -4530,7 +4530,7 @@ func (m *ListNodesResponse) CopyFrom(src interface{}) {
 		m.Nodes = make([]*Node, len(o.Nodes))
 		m.Nodes = make([]*Node, len(o.Nodes))
 		for i := range m.Nodes {
 		for i := range m.Nodes {
 			m.Nodes[i] = &Node{}
 			m.Nodes[i] = &Node{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Nodes[i], o.Nodes[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Nodes[i], o.Nodes[i])
 		}
 		}
 	}
 	}
 
 
@@ -4551,11 +4551,11 @@ func (m *UpdateNodeRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.NodeVersion != nil {
 	if o.NodeVersion != nil {
 		m.NodeVersion = &Version{}
 		m.NodeVersion = &Version{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.NodeVersion, o.NodeVersion)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.NodeVersion, o.NodeVersion)
 	}
 	}
 	if o.Spec != nil {
 	if o.Spec != nil {
 		m.Spec = &NodeSpec{}
 		m.Spec = &NodeSpec{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
 	}
 	}
 }
 }
 
 
@@ -4574,7 +4574,7 @@ func (m *UpdateNodeResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Node != nil {
 	if o.Node != nil {
 		m.Node = &Node{}
 		m.Node = &Node{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Node, o.Node)
 	}
 	}
 }
 }
 
 
@@ -4633,7 +4633,7 @@ func (m *GetTaskResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Task != nil {
 	if o.Task != nil {
 		m.Task = &Task{}
 		m.Task = &Task{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Task, o.Task)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Task, o.Task)
 	}
 	}
 }
 }
 
 
@@ -4677,7 +4677,7 @@ func (m *ListTasksRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Filters != nil {
 	if o.Filters != nil {
 		m.Filters = &ListTasksRequest_Filters{}
 		m.Filters = &ListTasksRequest_Filters{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
 	}
 	}
 }
 }
 
 
@@ -4755,7 +4755,7 @@ func (m *ListTasksResponse) CopyFrom(src interface{}) {
 		m.Tasks = make([]*Task, len(o.Tasks))
 		m.Tasks = make([]*Task, len(o.Tasks))
 		for i := range m.Tasks {
 		for i := range m.Tasks {
 			m.Tasks[i] = &Task{}
 			m.Tasks[i] = &Task{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i])
 		}
 		}
 	}
 	}
 
 
@@ -4776,7 +4776,7 @@ func (m *CreateServiceRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Spec != nil {
 	if o.Spec != nil {
 		m.Spec = &ServiceSpec{}
 		m.Spec = &ServiceSpec{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
 	}
 	}
 }
 }
 
 
@@ -4795,7 +4795,7 @@ func (m *CreateServiceResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Service != nil {
 	if o.Service != nil {
 		m.Service = &Service{}
 		m.Service = &Service{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Service, o.Service)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Service, o.Service)
 	}
 	}
 }
 }
 
 
@@ -4829,7 +4829,7 @@ func (m *GetServiceResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Service != nil {
 	if o.Service != nil {
 		m.Service = &Service{}
 		m.Service = &Service{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Service, o.Service)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Service, o.Service)
 	}
 	}
 }
 }
 
 
@@ -4848,11 +4848,11 @@ func (m *UpdateServiceRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.ServiceVersion != nil {
 	if o.ServiceVersion != nil {
 		m.ServiceVersion = &Version{}
 		m.ServiceVersion = &Version{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.ServiceVersion, o.ServiceVersion)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.ServiceVersion, o.ServiceVersion)
 	}
 	}
 	if o.Spec != nil {
 	if o.Spec != nil {
 		m.Spec = &ServiceSpec{}
 		m.Spec = &ServiceSpec{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
 	}
 	}
 }
 }
 
 
@@ -4871,7 +4871,7 @@ func (m *UpdateServiceResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Service != nil {
 	if o.Service != nil {
 		m.Service = &Service{}
 		m.Service = &Service{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Service, o.Service)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Service, o.Service)
 	}
 	}
 }
 }
 
 
@@ -4915,7 +4915,7 @@ func (m *ListServicesRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Filters != nil {
 	if o.Filters != nil {
 		m.Filters = &ListServicesRequest_Filters{}
 		m.Filters = &ListServicesRequest_Filters{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
 	}
 	}
 }
 }
 
 
@@ -4978,7 +4978,7 @@ func (m *ListServicesResponse) CopyFrom(src interface{}) {
 		m.Services = make([]*Service, len(o.Services))
 		m.Services = make([]*Service, len(o.Services))
 		for i := range m.Services {
 		for i := range m.Services {
 			m.Services[i] = &Service{}
 			m.Services[i] = &Service{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Services[i], o.Services[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Services[i], o.Services[i])
 		}
 		}
 	}
 	}
 
 
@@ -5021,7 +5021,7 @@ func (m *ListServiceStatusesResponse) CopyFrom(src interface{}) {
 		m.Statuses = make([]*ListServiceStatusesResponse_ServiceStatus, len(o.Statuses))
 		m.Statuses = make([]*ListServiceStatusesResponse_ServiceStatus, len(o.Statuses))
 		for i := range m.Statuses {
 		for i := range m.Statuses {
 			m.Statuses[i] = &ListServiceStatusesResponse_ServiceStatus{}
 			m.Statuses[i] = &ListServiceStatusesResponse_ServiceStatus{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Statuses[i], o.Statuses[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Statuses[i], o.Statuses[i])
 		}
 		}
 	}
 	}
 
 
@@ -5057,7 +5057,7 @@ func (m *CreateNetworkRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Spec != nil {
 	if o.Spec != nil {
 		m.Spec = &NetworkSpec{}
 		m.Spec = &NetworkSpec{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
 	}
 	}
 }
 }
 
 
@@ -5076,7 +5076,7 @@ func (m *CreateNetworkResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Network != nil {
 	if o.Network != nil {
 		m.Network = &Network{}
 		m.Network = &Network{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Network, o.Network)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Network, o.Network)
 	}
 	}
 }
 }
 
 
@@ -5110,7 +5110,7 @@ func (m *GetNetworkResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Network != nil {
 	if o.Network != nil {
 		m.Network = &Network{}
 		m.Network = &Network{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Network, o.Network)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Network, o.Network)
 	}
 	}
 }
 }
 
 
@@ -5154,7 +5154,7 @@ func (m *ListNetworksRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Filters != nil {
 	if o.Filters != nil {
 		m.Filters = &ListNetworksRequest_Filters{}
 		m.Filters = &ListNetworksRequest_Filters{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
 	}
 	}
 }
 }
 
 
@@ -5212,7 +5212,7 @@ func (m *ListNetworksResponse) CopyFrom(src interface{}) {
 		m.Networks = make([]*Network, len(o.Networks))
 		m.Networks = make([]*Network, len(o.Networks))
 		for i := range m.Networks {
 		for i := range m.Networks {
 			m.Networks[i] = &Network{}
 			m.Networks[i] = &Network{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Networks[i], o.Networks[i])
 		}
 		}
 	}
 	}
 
 
@@ -5248,7 +5248,7 @@ func (m *GetClusterResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Cluster != nil {
 	if o.Cluster != nil {
 		m.Cluster = &Cluster{}
 		m.Cluster = &Cluster{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Cluster, o.Cluster)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Cluster, o.Cluster)
 	}
 	}
 }
 }
 
 
@@ -5267,7 +5267,7 @@ func (m *ListClustersRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Filters != nil {
 	if o.Filters != nil {
 		m.Filters = &ListClustersRequest_Filters{}
 		m.Filters = &ListClustersRequest_Filters{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
 	}
 	}
 }
 }
 
 
@@ -5325,7 +5325,7 @@ func (m *ListClustersResponse) CopyFrom(src interface{}) {
 		m.Clusters = make([]*Cluster, len(o.Clusters))
 		m.Clusters = make([]*Cluster, len(o.Clusters))
 		for i := range m.Clusters {
 		for i := range m.Clusters {
 			m.Clusters[i] = &Cluster{}
 			m.Clusters[i] = &Cluster{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Clusters[i], o.Clusters[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Clusters[i], o.Clusters[i])
 		}
 		}
 	}
 	}
 
 
@@ -5361,13 +5361,13 @@ func (m *UpdateClusterRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.ClusterVersion != nil {
 	if o.ClusterVersion != nil {
 		m.ClusterVersion = &Version{}
 		m.ClusterVersion = &Version{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.ClusterVersion, o.ClusterVersion)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.ClusterVersion, o.ClusterVersion)
 	}
 	}
 	if o.Spec != nil {
 	if o.Spec != nil {
 		m.Spec = &ClusterSpec{}
 		m.Spec = &ClusterSpec{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
 	}
 	}
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Rotation, &o.Rotation)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Rotation, &o.Rotation)
 }
 }
 
 
 func (m *UpdateClusterResponse) Copy() *UpdateClusterResponse {
 func (m *UpdateClusterResponse) Copy() *UpdateClusterResponse {
@@ -5385,7 +5385,7 @@ func (m *UpdateClusterResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Cluster != nil {
 	if o.Cluster != nil {
 		m.Cluster = &Cluster{}
 		m.Cluster = &Cluster{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Cluster, o.Cluster)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Cluster, o.Cluster)
 	}
 	}
 }
 }
 
 
@@ -5419,7 +5419,7 @@ func (m *GetSecretResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Secret != nil {
 	if o.Secret != nil {
 		m.Secret = &Secret{}
 		m.Secret = &Secret{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Secret, o.Secret)
 	}
 	}
 }
 }
 
 
@@ -5438,11 +5438,11 @@ func (m *UpdateSecretRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.SecretVersion != nil {
 	if o.SecretVersion != nil {
 		m.SecretVersion = &Version{}
 		m.SecretVersion = &Version{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.SecretVersion, o.SecretVersion)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.SecretVersion, o.SecretVersion)
 	}
 	}
 	if o.Spec != nil {
 	if o.Spec != nil {
 		m.Spec = &SecretSpec{}
 		m.Spec = &SecretSpec{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
 	}
 	}
 }
 }
 
 
@@ -5461,7 +5461,7 @@ func (m *UpdateSecretResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Secret != nil {
 	if o.Secret != nil {
 		m.Secret = &Secret{}
 		m.Secret = &Secret{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Secret, o.Secret)
 	}
 	}
 }
 }
 
 
@@ -5480,7 +5480,7 @@ func (m *ListSecretsRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Filters != nil {
 	if o.Filters != nil {
 		m.Filters = &ListSecretsRequest_Filters{}
 		m.Filters = &ListSecretsRequest_Filters{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
 	}
 	}
 }
 }
 
 
@@ -5538,7 +5538,7 @@ func (m *ListSecretsResponse) CopyFrom(src interface{}) {
 		m.Secrets = make([]*Secret, len(o.Secrets))
 		m.Secrets = make([]*Secret, len(o.Secrets))
 		for i := range m.Secrets {
 		for i := range m.Secrets {
 			m.Secrets[i] = &Secret{}
 			m.Secrets[i] = &Secret{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Secrets[i], o.Secrets[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Secrets[i], o.Secrets[i])
 		}
 		}
 	}
 	}
 
 
@@ -5559,7 +5559,7 @@ func (m *CreateSecretRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Spec != nil {
 	if o.Spec != nil {
 		m.Spec = &SecretSpec{}
 		m.Spec = &SecretSpec{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
 	}
 	}
 }
 }
 
 
@@ -5578,7 +5578,7 @@ func (m *CreateSecretResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Secret != nil {
 	if o.Secret != nil {
 		m.Secret = &Secret{}
 		m.Secret = &Secret{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Secret, o.Secret)
 	}
 	}
 }
 }
 
 
@@ -5637,7 +5637,7 @@ func (m *GetConfigResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Config != nil {
 	if o.Config != nil {
 		m.Config = &Config{}
 		m.Config = &Config{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Config, o.Config)
 	}
 	}
 }
 }
 
 
@@ -5656,11 +5656,11 @@ func (m *UpdateConfigRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.ConfigVersion != nil {
 	if o.ConfigVersion != nil {
 		m.ConfigVersion = &Version{}
 		m.ConfigVersion = &Version{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.ConfigVersion, o.ConfigVersion)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.ConfigVersion, o.ConfigVersion)
 	}
 	}
 	if o.Spec != nil {
 	if o.Spec != nil {
 		m.Spec = &ConfigSpec{}
 		m.Spec = &ConfigSpec{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
 	}
 	}
 }
 }
 
 
@@ -5679,7 +5679,7 @@ func (m *UpdateConfigResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Config != nil {
 	if o.Config != nil {
 		m.Config = &Config{}
 		m.Config = &Config{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Config, o.Config)
 	}
 	}
 }
 }
 
 
@@ -5698,7 +5698,7 @@ func (m *ListConfigsRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Filters != nil {
 	if o.Filters != nil {
 		m.Filters = &ListConfigsRequest_Filters{}
 		m.Filters = &ListConfigsRequest_Filters{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
 	}
 	}
 }
 }
 
 
@@ -5756,7 +5756,7 @@ func (m *ListConfigsResponse) CopyFrom(src interface{}) {
 		m.Configs = make([]*Config, len(o.Configs))
 		m.Configs = make([]*Config, len(o.Configs))
 		for i := range m.Configs {
 		for i := range m.Configs {
 			m.Configs[i] = &Config{}
 			m.Configs[i] = &Config{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Configs[i], o.Configs[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Configs[i], o.Configs[i])
 		}
 		}
 	}
 	}
 
 
@@ -5777,7 +5777,7 @@ func (m *CreateConfigRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Spec != nil {
 	if o.Spec != nil {
 		m.Spec = &ConfigSpec{}
 		m.Spec = &ConfigSpec{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
 	}
 	}
 }
 }
 
 
@@ -5796,7 +5796,7 @@ func (m *CreateConfigResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Config != nil {
 	if o.Config != nil {
 		m.Config = &Config{}
 		m.Config = &Config{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Config, o.Config)
 	}
 	}
 }
 }
 
 
@@ -5840,7 +5840,7 @@ func (m *CreateExtensionRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Annotations != nil {
 	if o.Annotations != nil {
 		m.Annotations = &Annotations{}
 		m.Annotations = &Annotations{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Annotations, o.Annotations)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Annotations, o.Annotations)
 	}
 	}
 }
 }
 
 
@@ -5859,7 +5859,7 @@ func (m *CreateExtensionResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Extension != nil {
 	if o.Extension != nil {
 		m.Extension = &Extension{}
 		m.Extension = &Extension{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Extension, o.Extension)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Extension, o.Extension)
 	}
 	}
 }
 }
 
 
@@ -5918,7 +5918,7 @@ func (m *GetExtensionResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Extension != nil {
 	if o.Extension != nil {
 		m.Extension = &Extension{}
 		m.Extension = &Extension{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Extension, o.Extension)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Extension, o.Extension)
 	}
 	}
 }
 }
 
 
@@ -5937,11 +5937,11 @@ func (m *CreateResourceRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Annotations != nil {
 	if o.Annotations != nil {
 		m.Annotations = &Annotations{}
 		m.Annotations = &Annotations{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Annotations, o.Annotations)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Annotations, o.Annotations)
 	}
 	}
 	if o.Payload != nil {
 	if o.Payload != nil {
 		m.Payload = &types.Any{}
 		m.Payload = &types.Any{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Payload, o.Payload)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Payload, o.Payload)
 	}
 	}
 }
 }
 
 
@@ -5960,7 +5960,7 @@ func (m *CreateResourceResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Resource != nil {
 	if o.Resource != nil {
 		m.Resource = &Resource{}
 		m.Resource = &Resource{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Resource, o.Resource)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Resource, o.Resource)
 	}
 	}
 }
 }
 
 
@@ -6004,15 +6004,15 @@ func (m *UpdateResourceRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.ResourceVersion != nil {
 	if o.ResourceVersion != nil {
 		m.ResourceVersion = &Version{}
 		m.ResourceVersion = &Version{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.ResourceVersion, o.ResourceVersion)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.ResourceVersion, o.ResourceVersion)
 	}
 	}
 	if o.Annotations != nil {
 	if o.Annotations != nil {
 		m.Annotations = &Annotations{}
 		m.Annotations = &Annotations{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Annotations, o.Annotations)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Annotations, o.Annotations)
 	}
 	}
 	if o.Payload != nil {
 	if o.Payload != nil {
 		m.Payload = &types.Any{}
 		m.Payload = &types.Any{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Payload, o.Payload)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Payload, o.Payload)
 	}
 	}
 }
 }
 
 
@@ -6031,7 +6031,7 @@ func (m *UpdateResourceResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Resource != nil {
 	if o.Resource != nil {
 		m.Resource = &Resource{}
 		m.Resource = &Resource{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Resource, o.Resource)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Resource, o.Resource)
 	}
 	}
 }
 }
 
 
@@ -6065,7 +6065,7 @@ func (m *GetResourceResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Resource != nil {
 	if o.Resource != nil {
 		m.Resource = &Resource{}
 		m.Resource = &Resource{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Resource, o.Resource)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Resource, o.Resource)
 	}
 	}
 }
 }
 
 
@@ -6084,7 +6084,7 @@ func (m *ListResourcesRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Filters != nil {
 	if o.Filters != nil {
 		m.Filters = &ListResourcesRequest_Filters{}
 		m.Filters = &ListResourcesRequest_Filters{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
 	}
 	}
 }
 }
 
 
@@ -6142,7 +6142,7 @@ func (m *ListResourcesResponse) CopyFrom(src interface{}) {
 		m.Resources = make([]*Resource, len(o.Resources))
 		m.Resources = make([]*Resource, len(o.Resources))
 		for i := range m.Resources {
 		for i := range m.Resources {
 			m.Resources[i] = &Resource{}
 			m.Resources[i] = &Resource{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Resources[i], o.Resources[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Resources[i], o.Resources[i])
 		}
 		}
 	}
 	}
 
 
@@ -6163,7 +6163,7 @@ func (m *CreateVolumeRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Spec != nil {
 	if o.Spec != nil {
 		m.Spec = &VolumeSpec{}
 		m.Spec = &VolumeSpec{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
 	}
 	}
 }
 }
 
 
@@ -6182,7 +6182,7 @@ func (m *CreateVolumeResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Volume != nil {
 	if o.Volume != nil {
 		m.Volume = &Volume{}
 		m.Volume = &Volume{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Volume, o.Volume)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Volume, o.Volume)
 	}
 	}
 }
 }
 
 
@@ -6216,7 +6216,7 @@ func (m *GetVolumeResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Volume != nil {
 	if o.Volume != nil {
 		m.Volume = &Volume{}
 		m.Volume = &Volume{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Volume, o.Volume)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Volume, o.Volume)
 	}
 	}
 }
 }
 
 
@@ -6235,11 +6235,11 @@ func (m *UpdateVolumeRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.VolumeVersion != nil {
 	if o.VolumeVersion != nil {
 		m.VolumeVersion = &Version{}
 		m.VolumeVersion = &Version{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.VolumeVersion, o.VolumeVersion)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.VolumeVersion, o.VolumeVersion)
 	}
 	}
 	if o.Spec != nil {
 	if o.Spec != nil {
 		m.Spec = &VolumeSpec{}
 		m.Spec = &VolumeSpec{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
 	}
 	}
 }
 }
 
 
@@ -6258,7 +6258,7 @@ func (m *UpdateVolumeResponse) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Volume != nil {
 	if o.Volume != nil {
 		m.Volume = &Volume{}
 		m.Volume = &Volume{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Volume, o.Volume)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Volume, o.Volume)
 	}
 	}
 }
 }
 
 
@@ -6277,7 +6277,7 @@ func (m *ListVolumesRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Filters != nil {
 	if o.Filters != nil {
 		m.Filters = &ListVolumesRequest_Filters{}
 		m.Filters = &ListVolumesRequest_Filters{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
 	}
 	}
 }
 }
 
 
@@ -6345,7 +6345,7 @@ func (m *ListVolumesResponse) CopyFrom(src interface{}) {
 		m.Volumes = make([]*Volume, len(o.Volumes))
 		m.Volumes = make([]*Volume, len(o.Volumes))
 		for i := range m.Volumes {
 		for i := range m.Volumes {
 			m.Volumes[i] = &Volume{}
 			m.Volumes[i] = &Volume{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Volumes[i], o.Volumes[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Volumes[i], o.Volumes[i])
 		}
 		}
 	}
 	}
 
 

+ 0 - 0
vendor/github.com/docker/swarmkit/api/control.proto → vendor/github.com/moby/swarmkit/v2/api/control.proto


+ 0 - 0
vendor/github.com/docker/swarmkit/api/deepcopy/copy.go → vendor/github.com/moby/swarmkit/v2/api/deepcopy/copy.go


+ 2 - 2
vendor/github.com/docker/swarmkit/api/defaults/service.go → vendor/github.com/moby/swarmkit/v2/api/defaults/service.go

@@ -3,8 +3,8 @@ package defaults
 import (
 import (
 	"time"
 	"time"
 
 
-	"github.com/docker/swarmkit/api"
-	"github.com/docker/swarmkit/api/deepcopy"
+	"github.com/moby/swarmkit/v2/api"
+	"github.com/moby/swarmkit/v2/api/deepcopy"
 	gogotypes "github.com/gogo/protobuf/types"
 	gogotypes "github.com/gogo/protobuf/types"
 )
 )
 
 

+ 18 - 18
vendor/github.com/docker/swarmkit/api/dispatcher.pb.go → vendor/github.com/moby/swarmkit/v2/api/dispatcher.pb.go

@@ -6,13 +6,13 @@ package api
 import (
 import (
 	context "context"
 	context "context"
 	fmt "fmt"
 	fmt "fmt"
-	github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
-	raftselector "github.com/docker/swarmkit/manager/raftselector"
-	_ "github.com/docker/swarmkit/protobuf/plugin"
 	_ "github.com/gogo/protobuf/gogoproto"
 	_ "github.com/gogo/protobuf/gogoproto"
 	proto "github.com/gogo/protobuf/proto"
 	proto "github.com/gogo/protobuf/proto"
 	_ "github.com/gogo/protobuf/types"
 	_ "github.com/gogo/protobuf/types"
 	github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
 	github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+	github_com_moby_swarmkit_v2_api_deepcopy "github.com/moby/swarmkit/v2/api/deepcopy"
+	raftselector "github.com/moby/swarmkit/v2/manager/raftselector"
+	_ "github.com/moby/swarmkit/v2/protobuf/plugin"
 	grpc "google.golang.org/grpc"
 	grpc "google.golang.org/grpc"
 	codes "google.golang.org/grpc/codes"
 	codes "google.golang.org/grpc/codes"
 	metadata "google.golang.org/grpc/metadata"
 	metadata "google.golang.org/grpc/metadata"
@@ -1013,7 +1013,7 @@ func (m *SessionRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Description != nil {
 	if o.Description != nil {
 		m.Description = &NodeDescription{}
 		m.Description = &NodeDescription{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Description, o.Description)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Description, o.Description)
 	}
 	}
 }
 }
 
 
@@ -1032,13 +1032,13 @@ func (m *SessionMessage) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Node != nil {
 	if o.Node != nil {
 		m.Node = &Node{}
 		m.Node = &Node{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Node, o.Node)
 	}
 	}
 	if o.Managers != nil {
 	if o.Managers != nil {
 		m.Managers = make([]*WeightedPeer, len(o.Managers))
 		m.Managers = make([]*WeightedPeer, len(o.Managers))
 		for i := range m.Managers {
 		for i := range m.Managers {
 			m.Managers[i] = &WeightedPeer{}
 			m.Managers[i] = &WeightedPeer{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Managers[i], o.Managers[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Managers[i], o.Managers[i])
 		}
 		}
 	}
 	}
 
 
@@ -1046,7 +1046,7 @@ func (m *SessionMessage) CopyFrom(src interface{}) {
 		m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys))
 		m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys))
 		for i := range m.NetworkBootstrapKeys {
 		for i := range m.NetworkBootstrapKeys {
 			m.NetworkBootstrapKeys[i] = &EncryptionKey{}
 			m.NetworkBootstrapKeys[i] = &EncryptionKey{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i])
 		}
 		}
 	}
 	}
 
 
@@ -1084,7 +1084,7 @@ func (m *HeartbeatResponse) CopyFrom(src interface{}) {
 
 
 	o := src.(*HeartbeatResponse)
 	o := src.(*HeartbeatResponse)
 	*m = *o
 	*m = *o
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Period, &o.Period)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Period, &o.Period)
 }
 }
 
 
 func (m *UpdateTaskStatusRequest) Copy() *UpdateTaskStatusRequest {
 func (m *UpdateTaskStatusRequest) Copy() *UpdateTaskStatusRequest {
@@ -1104,7 +1104,7 @@ func (m *UpdateTaskStatusRequest) CopyFrom(src interface{}) {
 		m.Updates = make([]*UpdateTaskStatusRequest_TaskStatusUpdate, len(o.Updates))
 		m.Updates = make([]*UpdateTaskStatusRequest_TaskStatusUpdate, len(o.Updates))
 		for i := range m.Updates {
 		for i := range m.Updates {
 			m.Updates[i] = &UpdateTaskStatusRequest_TaskStatusUpdate{}
 			m.Updates[i] = &UpdateTaskStatusRequest_TaskStatusUpdate{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Updates[i], o.Updates[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Updates[i], o.Updates[i])
 		}
 		}
 	}
 	}
 
 
@@ -1125,7 +1125,7 @@ func (m *UpdateTaskStatusRequest_TaskStatusUpdate) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Status != nil {
 	if o.Status != nil {
 		m.Status = &TaskStatus{}
 		m.Status = &TaskStatus{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Status, o.Status)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Status, o.Status)
 	}
 	}
 }
 }
 
 
@@ -1156,7 +1156,7 @@ func (m *UpdateVolumeStatusRequest) CopyFrom(src interface{}) {
 		m.Updates = make([]*UpdateVolumeStatusRequest_VolumeStatusUpdate, len(o.Updates))
 		m.Updates = make([]*UpdateVolumeStatusRequest_VolumeStatusUpdate, len(o.Updates))
 		for i := range m.Updates {
 		for i := range m.Updates {
 			m.Updates[i] = &UpdateVolumeStatusRequest_VolumeStatusUpdate{}
 			m.Updates[i] = &UpdateVolumeStatusRequest_VolumeStatusUpdate{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Updates[i], o.Updates[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Updates[i], o.Updates[i])
 		}
 		}
 	}
 	}
 
 
@@ -1219,7 +1219,7 @@ func (m *TasksMessage) CopyFrom(src interface{}) {
 		m.Tasks = make([]*Task, len(o.Tasks))
 		m.Tasks = make([]*Task, len(o.Tasks))
 		for i := range m.Tasks {
 		for i := range m.Tasks {
 			m.Tasks[i] = &Task{}
 			m.Tasks[i] = &Task{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i])
 		}
 		}
 	}
 	}
 
 
@@ -1259,25 +1259,25 @@ func (m *Assignment) CopyFrom(src interface{}) {
 			v := Assignment_Task{
 			v := Assignment_Task{
 				Task: &Task{},
 				Task: &Task{},
 			}
 			}
-			github_com_docker_swarmkit_api_deepcopy.Copy(v.Task, o.GetTask())
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(v.Task, o.GetTask())
 			m.Item = &v
 			m.Item = &v
 		case *Assignment_Secret:
 		case *Assignment_Secret:
 			v := Assignment_Secret{
 			v := Assignment_Secret{
 				Secret: &Secret{},
 				Secret: &Secret{},
 			}
 			}
-			github_com_docker_swarmkit_api_deepcopy.Copy(v.Secret, o.GetSecret())
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(v.Secret, o.GetSecret())
 			m.Item = &v
 			m.Item = &v
 		case *Assignment_Config:
 		case *Assignment_Config:
 			v := Assignment_Config{
 			v := Assignment_Config{
 				Config: &Config{},
 				Config: &Config{},
 			}
 			}
-			github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig())
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(v.Config, o.GetConfig())
 			m.Item = &v
 			m.Item = &v
 		case *Assignment_Volume:
 		case *Assignment_Volume:
 			v := Assignment_Volume{
 			v := Assignment_Volume{
 				Volume: &VolumeAssignment{},
 				Volume: &VolumeAssignment{},
 			}
 			}
-			github_com_docker_swarmkit_api_deepcopy.Copy(v.Volume, o.GetVolume())
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(v.Volume, o.GetVolume())
 			m.Item = &v
 			m.Item = &v
 		}
 		}
 	}
 	}
@@ -1299,7 +1299,7 @@ func (m *AssignmentChange) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Assignment != nil {
 	if o.Assignment != nil {
 		m.Assignment = &Assignment{}
 		m.Assignment = &Assignment{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Assignment, o.Assignment)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Assignment, o.Assignment)
 	}
 	}
 }
 }
 
 
@@ -1320,7 +1320,7 @@ func (m *AssignmentsMessage) CopyFrom(src interface{}) {
 		m.Changes = make([]*AssignmentChange, len(o.Changes))
 		m.Changes = make([]*AssignmentChange, len(o.Changes))
 		for i := range m.Changes {
 		for i := range m.Changes {
 			m.Changes[i] = &AssignmentChange{}
 			m.Changes[i] = &AssignmentChange{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Changes[i], o.Changes[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Changes[i], o.Changes[i])
 		}
 		}
 	}
 	}
 
 

+ 0 - 0
vendor/github.com/docker/swarmkit/api/dispatcher.proto → vendor/github.com/moby/swarmkit/v2/api/dispatcher.proto


+ 1 - 1
vendor/github.com/docker/swarmkit/api/equality/equality.go → vendor/github.com/moby/swarmkit/v2/api/equality/equality.go

@@ -4,7 +4,7 @@ import (
 	"crypto/subtle"
 	"crypto/subtle"
 	"reflect"
 	"reflect"
 
 
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 // TasksEqualStable returns true if the tasks are functionally equal, ignoring status,
 // TasksEqualStable returns true if the tasks are functionally equal, ignoring status,

+ 1 - 1
vendor/github.com/docker/swarmkit/api/genericresource/helpers.go → vendor/github.com/moby/swarmkit/v2/api/genericresource/helpers.go

@@ -1,7 +1,7 @@
 package genericresource
 package genericresource
 
 
 import (
 import (
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 // NewSet creates a set object
 // NewSet creates a set object

+ 1 - 1
vendor/github.com/docker/swarmkit/api/genericresource/parse.go → vendor/github.com/moby/swarmkit/v2/api/genericresource/parse.go

@@ -6,7 +6,7 @@ import (
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 
 
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 func newParseError(format string, args ...interface{}) error {
 func newParseError(format string, args ...interface{}) error {

+ 1 - 1
vendor/github.com/docker/swarmkit/api/genericresource/resource_management.go → vendor/github.com/moby/swarmkit/v2/api/genericresource/resource_management.go

@@ -3,7 +3,7 @@ package genericresource
 import (
 import (
 	"fmt"
 	"fmt"
 
 
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 // Claim assigns GenericResources to a task by taking them from the
 // Claim assigns GenericResources to a task by taking them from the

+ 1 - 1
vendor/github.com/docker/swarmkit/api/genericresource/string.go → vendor/github.com/moby/swarmkit/v2/api/genericresource/string.go

@@ -4,7 +4,7 @@ import (
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 
 
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 func discreteToString(d *api.GenericResource_DiscreteResourceSpec) string {
 func discreteToString(d *api.GenericResource_DiscreteResourceSpec) string {

+ 1 - 1
vendor/github.com/docker/swarmkit/api/genericresource/validate.go → vendor/github.com/moby/swarmkit/v2/api/genericresource/validate.go

@@ -3,7 +3,7 @@ package genericresource
 import (
 import (
 	"fmt"
 	"fmt"
 
 
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 // ValidateTask validates that the task only uses integers
 // ValidateTask validates that the task only uses integers

+ 2 - 2
vendor/github.com/docker/swarmkit/api/health.pb.go → vendor/github.com/moby/swarmkit/v2/api/health.pb.go

@@ -6,10 +6,10 @@ package api
 import (
 import (
 	context "context"
 	context "context"
 	fmt "fmt"
 	fmt "fmt"
-	raftselector "github.com/docker/swarmkit/manager/raftselector"
-	_ "github.com/docker/swarmkit/protobuf/plugin"
 	_ "github.com/gogo/protobuf/gogoproto"
 	_ "github.com/gogo/protobuf/gogoproto"
 	proto "github.com/gogo/protobuf/proto"
 	proto "github.com/gogo/protobuf/proto"
+	raftselector "github.com/moby/swarmkit/v2/manager/raftselector"
+	_ "github.com/moby/swarmkit/v2/protobuf/plugin"
 	grpc "google.golang.org/grpc"
 	grpc "google.golang.org/grpc"
 	codes "google.golang.org/grpc/codes"
 	codes "google.golang.org/grpc/codes"
 	metadata "google.golang.org/grpc/metadata"
 	metadata "google.golang.org/grpc/metadata"

+ 0 - 0
vendor/github.com/docker/swarmkit/api/health.proto → vendor/github.com/moby/swarmkit/v2/api/health.proto


+ 13 - 13
vendor/github.com/docker/swarmkit/api/logbroker.pb.go → vendor/github.com/moby/swarmkit/v2/api/logbroker.pb.go

@@ -6,12 +6,12 @@ package api
 import (
 import (
 	context "context"
 	context "context"
 	fmt "fmt"
 	fmt "fmt"
-	github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
-	raftselector "github.com/docker/swarmkit/manager/raftselector"
-	_ "github.com/docker/swarmkit/protobuf/plugin"
 	_ "github.com/gogo/protobuf/gogoproto"
 	_ "github.com/gogo/protobuf/gogoproto"
 	proto "github.com/gogo/protobuf/proto"
 	proto "github.com/gogo/protobuf/proto"
 	types "github.com/gogo/protobuf/types"
 	types "github.com/gogo/protobuf/types"
+	github_com_moby_swarmkit_v2_api_deepcopy "github.com/moby/swarmkit/v2/api/deepcopy"
+	raftselector "github.com/moby/swarmkit/v2/manager/raftselector"
+	_ "github.com/moby/swarmkit/v2/protobuf/plugin"
 	grpc "google.golang.org/grpc"
 	grpc "google.golang.org/grpc"
 	codes "google.golang.org/grpc/codes"
 	codes "google.golang.org/grpc/codes"
 	metadata "google.golang.org/grpc/metadata"
 	metadata "google.golang.org/grpc/metadata"
@@ -686,7 +686,7 @@ func (m *LogSubscriptionOptions) CopyFrom(src interface{}) {
 
 
 	if o.Since != nil {
 	if o.Since != nil {
 		m.Since = &types.Timestamp{}
 		m.Since = &types.Timestamp{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Since, o.Since)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Since, o.Since)
 	}
 	}
 }
 }
 
 
@@ -763,10 +763,10 @@ func (m *LogMessage) CopyFrom(src interface{}) {
 
 
 	o := src.(*LogMessage)
 	o := src.(*LogMessage)
 	*m = *o
 	*m = *o
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Context, &o.Context)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Context, &o.Context)
 	if o.Timestamp != nil {
 	if o.Timestamp != nil {
 		m.Timestamp = &types.Timestamp{}
 		m.Timestamp = &types.Timestamp{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Timestamp, o.Timestamp)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Timestamp, o.Timestamp)
 	}
 	}
 	if o.Data != nil {
 	if o.Data != nil {
 		m.Data = make([]byte, len(o.Data))
 		m.Data = make([]byte, len(o.Data))
@@ -775,7 +775,7 @@ func (m *LogMessage) CopyFrom(src interface{}) {
 	if o.Attrs != nil {
 	if o.Attrs != nil {
 		m.Attrs = make([]LogAttr, len(o.Attrs))
 		m.Attrs = make([]LogAttr, len(o.Attrs))
 		for i := range m.Attrs {
 		for i := range m.Attrs {
-			github_com_docker_swarmkit_api_deepcopy.Copy(&m.Attrs[i], &o.Attrs[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Attrs[i], &o.Attrs[i])
 		}
 		}
 	}
 	}
 
 
@@ -796,11 +796,11 @@ func (m *SubscribeLogsRequest) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Selector != nil {
 	if o.Selector != nil {
 		m.Selector = &LogSelector{}
 		m.Selector = &LogSelector{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Selector, o.Selector)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Selector, o.Selector)
 	}
 	}
 	if o.Options != nil {
 	if o.Options != nil {
 		m.Options = &LogSubscriptionOptions{}
 		m.Options = &LogSubscriptionOptions{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Options, o.Options)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Options, o.Options)
 	}
 	}
 }
 }
 
 
@@ -820,7 +820,7 @@ func (m *SubscribeLogsMessage) CopyFrom(src interface{}) {
 	if o.Messages != nil {
 	if o.Messages != nil {
 		m.Messages = make([]LogMessage, len(o.Messages))
 		m.Messages = make([]LogMessage, len(o.Messages))
 		for i := range m.Messages {
 		for i := range m.Messages {
-			github_com_docker_swarmkit_api_deepcopy.Copy(&m.Messages[i], &o.Messages[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Messages[i], &o.Messages[i])
 		}
 		}
 	}
 	}
 
 
@@ -851,11 +851,11 @@ func (m *SubscriptionMessage) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Selector != nil {
 	if o.Selector != nil {
 		m.Selector = &LogSelector{}
 		m.Selector = &LogSelector{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Selector, o.Selector)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Selector, o.Selector)
 	}
 	}
 	if o.Options != nil {
 	if o.Options != nil {
 		m.Options = &LogSubscriptionOptions{}
 		m.Options = &LogSubscriptionOptions{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Options, o.Options)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Options, o.Options)
 	}
 	}
 }
 }
 
 
@@ -875,7 +875,7 @@ func (m *PublishLogsMessage) CopyFrom(src interface{}) {
 	if o.Messages != nil {
 	if o.Messages != nil {
 		m.Messages = make([]LogMessage, len(o.Messages))
 		m.Messages = make([]LogMessage, len(o.Messages))
 		for i := range m.Messages {
 		for i := range m.Messages {
-			github_com_docker_swarmkit_api_deepcopy.Copy(&m.Messages[i], &o.Messages[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Messages[i], &o.Messages[i])
 		}
 		}
 	}
 	}
 
 

+ 0 - 0
vendor/github.com/docker/swarmkit/api/logbroker.proto → vendor/github.com/moby/swarmkit/v2/api/logbroker.proto


+ 1 - 1
vendor/github.com/docker/swarmkit/api/naming/naming.go → vendor/github.com/moby/swarmkit/v2/api/naming/naming.go

@@ -6,7 +6,7 @@ import (
 	"fmt"
 	"fmt"
 	"strings"
 	"strings"
 
 
-	"github.com/docker/swarmkit/api"
+	"github.com/moby/swarmkit/v2/api"
 )
 )
 
 
 var (
 var (

+ 60 - 60
vendor/github.com/docker/swarmkit/api/objects.pb.go → vendor/github.com/moby/swarmkit/v2/api/objects.pb.go

@@ -6,12 +6,12 @@ package api
 import (
 import (
 	fmt "fmt"
 	fmt "fmt"
 	github_com_docker_go_events "github.com/docker/go-events"
 	github_com_docker_go_events "github.com/docker/go-events"
-	github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
-	_ "github.com/docker/swarmkit/protobuf/plugin"
 	_ "github.com/gogo/protobuf/gogoproto"
 	_ "github.com/gogo/protobuf/gogoproto"
 	proto "github.com/gogo/protobuf/proto"
 	proto "github.com/gogo/protobuf/proto"
 	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
 	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
 	types "github.com/gogo/protobuf/types"
 	types "github.com/gogo/protobuf/types"
+	github_com_moby_swarmkit_v2_api_deepcopy "github.com/moby/swarmkit/v2/api/deepcopy"
+	_ "github.com/moby/swarmkit/v2/protobuf/plugin"
 	io "io"
 	io "io"
 	math "math"
 	math "math"
 	math_bits "math/bits"
 	math_bits "math/bits"
@@ -963,14 +963,14 @@ func (m *Meta) CopyFrom(src interface{}) {
 
 
 	o := src.(*Meta)
 	o := src.(*Meta)
 	*m = *o
 	*m = *o
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Version, &o.Version)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Version, &o.Version)
 	if o.CreatedAt != nil {
 	if o.CreatedAt != nil {
 		m.CreatedAt = &types.Timestamp{}
 		m.CreatedAt = &types.Timestamp{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.CreatedAt, o.CreatedAt)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.CreatedAt, o.CreatedAt)
 	}
 	}
 	if o.UpdatedAt != nil {
 	if o.UpdatedAt != nil {
 		m.UpdatedAt = &types.Timestamp{}
 		m.UpdatedAt = &types.Timestamp{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.UpdatedAt, o.UpdatedAt)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.UpdatedAt, o.UpdatedAt)
 	}
 	}
 }
 }
 
 
@@ -987,27 +987,27 @@ func (m *Node) CopyFrom(src interface{}) {
 
 
 	o := src.(*Node)
 	o := src.(*Node)
 	*m = *o
 	*m = *o
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta)
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Meta, &o.Meta)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Spec, &o.Spec)
 	if o.Description != nil {
 	if o.Description != nil {
 		m.Description = &NodeDescription{}
 		m.Description = &NodeDescription{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Description, o.Description)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Description, o.Description)
 	}
 	}
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Status, &o.Status)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Status, &o.Status)
 	if o.ManagerStatus != nil {
 	if o.ManagerStatus != nil {
 		m.ManagerStatus = &ManagerStatus{}
 		m.ManagerStatus = &ManagerStatus{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.ManagerStatus, o.ManagerStatus)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.ManagerStatus, o.ManagerStatus)
 	}
 	}
 	if o.Attachment != nil {
 	if o.Attachment != nil {
 		m.Attachment = &NetworkAttachment{}
 		m.Attachment = &NetworkAttachment{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Attachment, o.Attachment)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Attachment, o.Attachment)
 	}
 	}
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Certificate, &o.Certificate)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Certificate, &o.Certificate)
 	if o.Attachments != nil {
 	if o.Attachments != nil {
 		m.Attachments = make([]*NetworkAttachment, len(o.Attachments))
 		m.Attachments = make([]*NetworkAttachment, len(o.Attachments))
 		for i := range m.Attachments {
 		for i := range m.Attachments {
 			m.Attachments[i] = &NetworkAttachment{}
 			m.Attachments[i] = &NetworkAttachment{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Attachments[i], o.Attachments[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Attachments[i], o.Attachments[i])
 		}
 		}
 	}
 	}
 
 
@@ -1026,31 +1026,31 @@ func (m *Service) CopyFrom(src interface{}) {
 
 
 	o := src.(*Service)
 	o := src.(*Service)
 	*m = *o
 	*m = *o
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta)
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Meta, &o.Meta)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Spec, &o.Spec)
 	if o.SpecVersion != nil {
 	if o.SpecVersion != nil {
 		m.SpecVersion = &Version{}
 		m.SpecVersion = &Version{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.SpecVersion, o.SpecVersion)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.SpecVersion, o.SpecVersion)
 	}
 	}
 	if o.PreviousSpec != nil {
 	if o.PreviousSpec != nil {
 		m.PreviousSpec = &ServiceSpec{}
 		m.PreviousSpec = &ServiceSpec{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.PreviousSpec, o.PreviousSpec)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.PreviousSpec, o.PreviousSpec)
 	}
 	}
 	if o.PreviousSpecVersion != nil {
 	if o.PreviousSpecVersion != nil {
 		m.PreviousSpecVersion = &Version{}
 		m.PreviousSpecVersion = &Version{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.PreviousSpecVersion, o.PreviousSpecVersion)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.PreviousSpecVersion, o.PreviousSpecVersion)
 	}
 	}
 	if o.Endpoint != nil {
 	if o.Endpoint != nil {
 		m.Endpoint = &Endpoint{}
 		m.Endpoint = &Endpoint{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Endpoint, o.Endpoint)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Endpoint, o.Endpoint)
 	}
 	}
 	if o.UpdateStatus != nil {
 	if o.UpdateStatus != nil {
 		m.UpdateStatus = &UpdateStatus{}
 		m.UpdateStatus = &UpdateStatus{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.UpdateStatus, o.UpdateStatus)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.UpdateStatus, o.UpdateStatus)
 	}
 	}
 	if o.JobStatus != nil {
 	if o.JobStatus != nil {
 		m.JobStatus = &JobStatus{}
 		m.JobStatus = &JobStatus{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.JobStatus, o.JobStatus)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.JobStatus, o.JobStatus)
 	}
 	}
 }
 }
 
 
@@ -1069,13 +1069,13 @@ func (m *Endpoint) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Spec != nil {
 	if o.Spec != nil {
 		m.Spec = &EndpointSpec{}
 		m.Spec = &EndpointSpec{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
 	}
 	}
 	if o.Ports != nil {
 	if o.Ports != nil {
 		m.Ports = make([]*PortConfig, len(o.Ports))
 		m.Ports = make([]*PortConfig, len(o.Ports))
 		for i := range m.Ports {
 		for i := range m.Ports {
 			m.Ports[i] = &PortConfig{}
 			m.Ports[i] = &PortConfig{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Ports[i], o.Ports[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Ports[i], o.Ports[i])
 		}
 		}
 	}
 	}
 
 
@@ -1083,7 +1083,7 @@ func (m *Endpoint) CopyFrom(src interface{}) {
 		m.VirtualIPs = make([]*Endpoint_VirtualIP, len(o.VirtualIPs))
 		m.VirtualIPs = make([]*Endpoint_VirtualIP, len(o.VirtualIPs))
 		for i := range m.VirtualIPs {
 		for i := range m.VirtualIPs {
 			m.VirtualIPs[i] = &Endpoint_VirtualIP{}
 			m.VirtualIPs[i] = &Endpoint_VirtualIP{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.VirtualIPs[i], o.VirtualIPs[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.VirtualIPs[i], o.VirtualIPs[i])
 		}
 		}
 	}
 	}
 
 
@@ -1117,48 +1117,48 @@ func (m *Task) CopyFrom(src interface{}) {
 
 
 	o := src.(*Task)
 	o := src.(*Task)
 	*m = *o
 	*m = *o
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta)
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Meta, &o.Meta)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Spec, &o.Spec)
 	if o.SpecVersion != nil {
 	if o.SpecVersion != nil {
 		m.SpecVersion = &Version{}
 		m.SpecVersion = &Version{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.SpecVersion, o.SpecVersion)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.SpecVersion, o.SpecVersion)
 	}
 	}
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations)
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.ServiceAnnotations, &o.ServiceAnnotations)
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Status, &o.Status)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Annotations, &o.Annotations)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.ServiceAnnotations, &o.ServiceAnnotations)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Status, &o.Status)
 	if o.Networks != nil {
 	if o.Networks != nil {
 		m.Networks = make([]*NetworkAttachment, len(o.Networks))
 		m.Networks = make([]*NetworkAttachment, len(o.Networks))
 		for i := range m.Networks {
 		for i := range m.Networks {
 			m.Networks[i] = &NetworkAttachment{}
 			m.Networks[i] = &NetworkAttachment{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Networks[i], o.Networks[i])
 		}
 		}
 	}
 	}
 
 
 	if o.Endpoint != nil {
 	if o.Endpoint != nil {
 		m.Endpoint = &Endpoint{}
 		m.Endpoint = &Endpoint{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Endpoint, o.Endpoint)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Endpoint, o.Endpoint)
 	}
 	}
 	if o.LogDriver != nil {
 	if o.LogDriver != nil {
 		m.LogDriver = &Driver{}
 		m.LogDriver = &Driver{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.LogDriver, o.LogDriver)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.LogDriver, o.LogDriver)
 	}
 	}
 	if o.AssignedGenericResources != nil {
 	if o.AssignedGenericResources != nil {
 		m.AssignedGenericResources = make([]*GenericResource, len(o.AssignedGenericResources))
 		m.AssignedGenericResources = make([]*GenericResource, len(o.AssignedGenericResources))
 		for i := range m.AssignedGenericResources {
 		for i := range m.AssignedGenericResources {
 			m.AssignedGenericResources[i] = &GenericResource{}
 			m.AssignedGenericResources[i] = &GenericResource{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.AssignedGenericResources[i], o.AssignedGenericResources[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.AssignedGenericResources[i], o.AssignedGenericResources[i])
 		}
 		}
 	}
 	}
 
 
 	if o.JobIteration != nil {
 	if o.JobIteration != nil {
 		m.JobIteration = &Version{}
 		m.JobIteration = &Version{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.JobIteration, o.JobIteration)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.JobIteration, o.JobIteration)
 	}
 	}
 	if o.Volumes != nil {
 	if o.Volumes != nil {
 		m.Volumes = make([]*VolumeAttachment, len(o.Volumes))
 		m.Volumes = make([]*VolumeAttachment, len(o.Volumes))
 		for i := range m.Volumes {
 		for i := range m.Volumes {
 			m.Volumes[i] = &VolumeAttachment{}
 			m.Volumes[i] = &VolumeAttachment{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.Volumes[i], o.Volumes[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Volumes[i], o.Volumes[i])
 		}
 		}
 	}
 	}
 
 
@@ -1179,7 +1179,7 @@ func (m *NetworkAttachment) CopyFrom(src interface{}) {
 	*m = *o
 	*m = *o
 	if o.Network != nil {
 	if o.Network != nil {
 		m.Network = &Network{}
 		m.Network = &Network{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Network, o.Network)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Network, o.Network)
 	}
 	}
 	if o.Addresses != nil {
 	if o.Addresses != nil {
 		m.Addresses = make([]string, len(o.Addresses))
 		m.Addresses = make([]string, len(o.Addresses))
@@ -1213,15 +1213,15 @@ func (m *Network) CopyFrom(src interface{}) {
 
 
 	o := src.(*Network)
 	o := src.(*Network)
 	*m = *o
 	*m = *o
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta)
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Meta, &o.Meta)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Spec, &o.Spec)
 	if o.DriverState != nil {
 	if o.DriverState != nil {
 		m.DriverState = &Driver{}
 		m.DriverState = &Driver{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.DriverState, o.DriverState)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.DriverState, o.DriverState)
 	}
 	}
 	if o.IPAM != nil {
 	if o.IPAM != nil {
 		m.IPAM = &IPAMOptions{}
 		m.IPAM = &IPAMOptions{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.IPAM, o.IPAM)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.IPAM, o.IPAM)
 	}
 	}
 }
 }
 
 
@@ -1238,14 +1238,14 @@ func (m *Cluster) CopyFrom(src interface{}) {
 
 
 	o := src.(*Cluster)
 	o := src.(*Cluster)
 	*m = *o
 	*m = *o
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta)
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec)
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.RootCA, &o.RootCA)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Meta, &o.Meta)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Spec, &o.Spec)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.RootCA, &o.RootCA)
 	if o.NetworkBootstrapKeys != nil {
 	if o.NetworkBootstrapKeys != nil {
 		m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys))
 		m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys))
 		for i := range m.NetworkBootstrapKeys {
 		for i := range m.NetworkBootstrapKeys {
 			m.NetworkBootstrapKeys[i] = &EncryptionKey{}
 			m.NetworkBootstrapKeys[i] = &EncryptionKey{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i])
 		}
 		}
 	}
 	}
 
 
@@ -1253,7 +1253,7 @@ func (m *Cluster) CopyFrom(src interface{}) {
 		m.BlacklistedCertificates = make(map[string]*BlacklistedCertificate, len(o.BlacklistedCertificates))
 		m.BlacklistedCertificates = make(map[string]*BlacklistedCertificate, len(o.BlacklistedCertificates))
 		for k, v := range o.BlacklistedCertificates {
 		for k, v := range o.BlacklistedCertificates {
 			m.BlacklistedCertificates[k] = &BlacklistedCertificate{}
 			m.BlacklistedCertificates[k] = &BlacklistedCertificate{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.BlacklistedCertificates[k], v)
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.BlacklistedCertificates[k], v)
 		}
 		}
 	}
 	}
 
 
@@ -1261,7 +1261,7 @@ func (m *Cluster) CopyFrom(src interface{}) {
 		m.UnlockKeys = make([]*EncryptionKey, len(o.UnlockKeys))
 		m.UnlockKeys = make([]*EncryptionKey, len(o.UnlockKeys))
 		for i := range m.UnlockKeys {
 		for i := range m.UnlockKeys {
 			m.UnlockKeys[i] = &EncryptionKey{}
 			m.UnlockKeys[i] = &EncryptionKey{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.UnlockKeys[i], o.UnlockKeys[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.UnlockKeys[i], o.UnlockKeys[i])
 		}
 		}
 	}
 	}
 
 
@@ -1285,8 +1285,8 @@ func (m *Secret) CopyFrom(src interface{}) {
 
 
 	o := src.(*Secret)
 	o := src.(*Secret)
 	*m = *o
 	*m = *o
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta)
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Meta, &o.Meta)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Spec, &o.Spec)
 }
 }
 
 
 func (m *Config) Copy() *Config {
 func (m *Config) Copy() *Config {
@@ -1302,8 +1302,8 @@ func (m *Config) CopyFrom(src interface{}) {
 
 
 	o := src.(*Config)
 	o := src.(*Config)
 	*m = *o
 	*m = *o
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta)
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Meta, &o.Meta)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Spec, &o.Spec)
 }
 }
 
 
 func (m *Resource) Copy() *Resource {
 func (m *Resource) Copy() *Resource {
@@ -1319,11 +1319,11 @@ func (m *Resource) CopyFrom(src interface{}) {
 
 
 	o := src.(*Resource)
 	o := src.(*Resource)
 	*m = *o
 	*m = *o
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta)
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Meta, &o.Meta)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Annotations, &o.Annotations)
 	if o.Payload != nil {
 	if o.Payload != nil {
 		m.Payload = &types.Any{}
 		m.Payload = &types.Any{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.Payload, o.Payload)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Payload, o.Payload)
 	}
 	}
 }
 }
 
 
@@ -1340,8 +1340,8 @@ func (m *Extension) CopyFrom(src interface{}) {
 
 
 	o := src.(*Extension)
 	o := src.(*Extension)
 	*m = *o
 	*m = *o
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta)
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Meta, &o.Meta)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Annotations, &o.Annotations)
 }
 }
 
 
 func (m *Volume) Copy() *Volume {
 func (m *Volume) Copy() *Volume {
@@ -1357,19 +1357,19 @@ func (m *Volume) CopyFrom(src interface{}) {
 
 
 	o := src.(*Volume)
 	o := src.(*Volume)
 	*m = *o
 	*m = *o
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta)
-	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Meta, &o.Meta)
+	github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Spec, &o.Spec)
 	if o.PublishStatus != nil {
 	if o.PublishStatus != nil {
 		m.PublishStatus = make([]*VolumePublishStatus, len(o.PublishStatus))
 		m.PublishStatus = make([]*VolumePublishStatus, len(o.PublishStatus))
 		for i := range m.PublishStatus {
 		for i := range m.PublishStatus {
 			m.PublishStatus[i] = &VolumePublishStatus{}
 			m.PublishStatus[i] = &VolumePublishStatus{}
-			github_com_docker_swarmkit_api_deepcopy.Copy(m.PublishStatus[i], o.PublishStatus[i])
+			github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.PublishStatus[i], o.PublishStatus[i])
 		}
 		}
 	}
 	}
 
 
 	if o.VolumeInfo != nil {
 	if o.VolumeInfo != nil {
 		m.VolumeInfo = &VolumeInfo{}
 		m.VolumeInfo = &VolumeInfo{}
-		github_com_docker_swarmkit_api_deepcopy.Copy(m.VolumeInfo, o.VolumeInfo)
+		github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.VolumeInfo, o.VolumeInfo)
 	}
 	}
 }
 }
 
 

+ 0 - 0
vendor/github.com/docker/swarmkit/api/objects.proto → vendor/github.com/moby/swarmkit/v2/api/objects.proto


이 변경점에서 너무 많은 파일들이 변경되어 몇몇 파일들은 표시되지 않았습니다.