Преглед изворни кода

Merge pull request #44079 from thaJeztah/c8d_build

containerd integration: Make build work
Sebastiaan van Stijn пре 2 година
родитељ
комит
b3428bcf88
100 измењених фајлова са 13899 додато и 61 уклоњено
  1. 31 15
      builder/builder-next/builder.go
  2. 123 17
      builder/builder-next/controller.go
  3. 3 0
      builder/builder-next/exporter/exporter.go
  4. 1 1
      builder/builder-next/exporter/mobyexporter/export.go
  5. 1 1
      builder/builder-next/exporter/mobyexporter/writer.go
  6. 34 0
      builder/builder-next/exporter/overrides/overrides.go
  7. 33 0
      builder/builder-next/exporter/overrides/wrapper.go
  8. 40 0
      builder/builder-next/worker/containerdworker.go
  9. 2 1
      builder/builder-next/worker/worker.go
  10. 31 25
      cmd/dockerd/daemon.go
  11. 6 1
      vendor.mod
  12. 345 0
      vendor.sum
  13. 328 0
      vendor/github.com/Microsoft/hcsshim/hcn/hcn.go
  14. 388 0
      vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go
  15. 164 0
      vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go
  16. 138 0
      vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go
  17. 311 0
      vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go
  18. 446 0
      vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go
  19. 462 0
      vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go
  20. 344 0
      vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go
  21. 266 0
      vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go
  22. 147 0
      vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go
  23. 795 0
      vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go
  24. 110 0
      vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go
  25. 288 0
      vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go
  26. 51 0
      vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go
  27. 71 0
      vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go
  28. 16 0
      vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go
  29. 43 0
      vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go
  30. 3 0
      vendor/github.com/containerd/go-cni/.gitignore
  31. 23 0
      vendor/github.com/containerd/go-cni/.golangci.yml
  32. 201 0
      vendor/github.com/containerd/go-cni/LICENSE
  33. 41 0
      vendor/github.com/containerd/go-cni/Makefile
  34. 96 0
      vendor/github.com/containerd/go-cni/README.md
  35. 312 0
      vendor/github.com/containerd/go-cni/cni.go
  36. 34 0
      vendor/github.com/containerd/go-cni/deprecated.go
  37. 55 0
      vendor/github.com/containerd/go-cni/errors.go
  38. 41 0
      vendor/github.com/containerd/go-cni/helper.go
  39. 81 0
      vendor/github.com/containerd/go-cni/namespace.go
  40. 77 0
      vendor/github.com/containerd/go-cni/namespace_opts.go
  41. 273 0
      vendor/github.com/containerd/go-cni/opts.go
  42. 114 0
      vendor/github.com/containerd/go-cni/result.go
  43. 78 0
      vendor/github.com/containerd/go-cni/testutils.go
  44. 65 0
      vendor/github.com/containerd/go-cni/types.go
  45. 202 0
      vendor/github.com/containernetworking/cni/LICENSE
  46. 679 0
      vendor/github.com/containernetworking/cni/libcni/api.go
  47. 270 0
      vendor/github.com/containernetworking/cni/libcni/conf.go
  48. 128 0
      vendor/github.com/containernetworking/cni/pkg/invoke/args.go
  49. 80 0
      vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
  50. 181 0
      vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
  51. 48 0
      vendor/github.com/containernetworking/cni/pkg/invoke/find.go
  52. 20 0
      vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
  53. 18 0
      vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go
  54. 88 0
      vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
  55. 189 0
      vendor/github.com/containernetworking/cni/pkg/types/020/types.go
  56. 306 0
      vendor/github.com/containernetworking/cni/pkg/types/040/types.go
  57. 307 0
      vendor/github.com/containernetworking/cni/pkg/types/100/types.go
  58. 122 0
      vendor/github.com/containernetworking/cni/pkg/types/args.go
  59. 56 0
      vendor/github.com/containernetworking/cni/pkg/types/create/create.go
  60. 92 0
      vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go
  61. 66 0
      vendor/github.com/containernetworking/cni/pkg/types/internal/create.go
  62. 234 0
      vendor/github.com/containernetworking/cni/pkg/types/types.go
  63. 84 0
      vendor/github.com/containernetworking/cni/pkg/utils/utils.go
  64. 26 0
      vendor/github.com/containernetworking/cni/pkg/version/conf.go
  65. 144 0
      vendor/github.com/containernetworking/cni/pkg/version/plugin.go
  66. 49 0
      vendor/github.com/containernetworking/cni/pkg/version/reconcile.go
  67. 89 0
      vendor/github.com/containernetworking/cni/pkg/version/version.go
  68. 37 0
      vendor/github.com/dimchansky/utfbom/.gitignore
  69. 29 0
      vendor/github.com/dimchansky/utfbom/.travis.yml
  70. 201 0
      vendor/github.com/dimchansky/utfbom/LICENSE
  71. 66 0
      vendor/github.com/dimchansky/utfbom/README.md
  72. 192 0
      vendor/github.com/dimchansky/utfbom/utfbom.go
  73. 4 0
      vendor/github.com/golang-jwt/jwt/v4/.gitignore
  74. 9 0
      vendor/github.com/golang-jwt/jwt/v4/LICENSE
  75. 22 0
      vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md
  76. 138 0
      vendor/github.com/golang-jwt/jwt/v4/README.md
  77. 19 0
      vendor/github.com/golang-jwt/jwt/v4/SECURITY.md
  78. 135 0
      vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md
  79. 273 0
      vendor/github.com/golang-jwt/jwt/v4/claims.go
  80. 4 0
      vendor/github.com/golang-jwt/jwt/v4/doc.go
  81. 142 0
      vendor/github.com/golang-jwt/jwt/v4/ecdsa.go
  82. 69 0
      vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go
  83. 85 0
      vendor/github.com/golang-jwt/jwt/v4/ed25519.go
  84. 64 0
      vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go
  85. 112 0
      vendor/github.com/golang-jwt/jwt/v4/errors.go
  86. 95 0
      vendor/github.com/golang-jwt/jwt/v4/hmac.go
  87. 151 0
      vendor/github.com/golang-jwt/jwt/v4/map_claims.go
  88. 52 0
      vendor/github.com/golang-jwt/jwt/v4/none.go
  89. 170 0
      vendor/github.com/golang-jwt/jwt/v4/parser.go
  90. 29 0
      vendor/github.com/golang-jwt/jwt/v4/parser_option.go
  91. 101 0
      vendor/github.com/golang-jwt/jwt/v4/rsa.go
  92. 143 0
      vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
  93. 105 0
      vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go
  94. 46 0
      vendor/github.com/golang-jwt/jwt/v4/signing_method.go
  95. 1 0
      vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf
  96. 127 0
      vendor/github.com/golang-jwt/jwt/v4/token.go
  97. 145 0
      vendor/github.com/golang-jwt/jwt/v4/types.go
  98. 384 0
      vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go
  99. 461 0
      vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go
  100. 298 0
      vendor/github.com/moby/buildkit/exporter/oci/export.go

+ 31 - 15
builder/builder-next/builder.go

@@ -15,6 +15,8 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/builder"
+	mobyexporter "github.com/docker/docker/builder/builder-next/exporter"
+	"github.com/docker/docker/builder/builder-next/exporter/overrides"
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/daemon/images"
 	"github.com/docker/docker/daemon/images"
 	"github.com/docker/docker/libnetwork"
 	"github.com/docker/docker/libnetwork"
@@ -77,6 +79,10 @@ type Opt struct {
 	IdentityMapping     idtools.IdentityMapping
 	IdentityMapping     idtools.IdentityMapping
 	DNSConfig           config.DNSConfig
 	DNSConfig           config.DNSConfig
 	ApparmorProfile     string
 	ApparmorProfile     string
+	UseSnapshotter      bool
+	Snapshotter         string
+	ContainerdAddress   string
+	ContainerdNamespace string
 }
 }
 
 
 // Builder can build using BuildKit backend
 // Builder can build using BuildKit backend
@@ -85,15 +91,16 @@ type Builder struct {
 	dnsconfig      config.DNSConfig
 	dnsconfig      config.DNSConfig
 	reqBodyHandler *reqBodyHandler
 	reqBodyHandler *reqBodyHandler
 
 
-	mu   sync.Mutex
-	jobs map[string]*buildJob
+	mu             sync.Mutex
+	jobs           map[string]*buildJob
+	useSnapshotter bool
 }
 }
 
 
 // New creates a new builder
 // New creates a new builder
-func New(opt Opt) (*Builder, error) {
+func New(ctx context.Context, opt Opt) (*Builder, error) {
 	reqHandler := newReqBodyHandler(tracing.DefaultTransport)
 	reqHandler := newReqBodyHandler(tracing.DefaultTransport)
 
 
-	c, err := newController(reqHandler, opt)
+	c, err := newController(ctx, reqHandler, opt)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -102,6 +109,7 @@ func New(opt Opt) (*Builder, error) {
 		dnsconfig:      opt.DNSConfig,
 		dnsconfig:      opt.DNSConfig,
 		reqBodyHandler: reqHandler,
 		reqBodyHandler: reqHandler,
 		jobs:           map[string]*buildJob{},
 		jobs:           map[string]*buildJob{},
+		useSnapshotter: opt.UseSnapshotter,
 	}
 	}
 	return b, nil
 	return b, nil
 }
 }
@@ -202,8 +210,11 @@ func (b *Builder) Prune(ctx context.Context, opts types.BuildCachePruneOptions)
 
 
 // Build executes a build request
 // Build executes a build request
 func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.Result, error) {
 func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.Result, error) {
-	var rc = opt.Source
+	if len(opt.Options.Outputs) > 1 {
+		return nil, errors.Errorf("multiple outputs not supported")
+	}
 
 
+	var rc = opt.Source
 	if buildID := opt.Options.BuildID; buildID != "" {
 	if buildID := opt.Options.BuildID; buildID != "" {
 		b.mu.Lock()
 		b.mu.Lock()
 
 
@@ -333,11 +344,12 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
 
 
 	exporterName := ""
 	exporterName := ""
 	exporterAttrs := map[string]string{}
 	exporterAttrs := map[string]string{}
-
-	if len(opt.Options.Outputs) > 1 {
-		return nil, errors.Errorf("multiple outputs not supported")
-	} else if len(opt.Options.Outputs) == 0 {
-		exporterName = "moby"
+	if len(opt.Options.Outputs) == 0 {
+		if b.useSnapshotter {
+			exporterName = client.ExporterImage
+		} else {
+			exporterName = mobyexporter.Moby
+		}
 	} else {
 	} else {
 		// cacheonly is a special type for triggering skipping all exporters
 		// cacheonly is a special type for triggering skipping all exporters
 		if opt.Options.Outputs[0].Type != "cacheonly" {
 		if opt.Options.Outputs[0].Type != "cacheonly" {
@@ -346,14 +358,18 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
 		}
 		}
 	}
 	}
 
 
-	if exporterName == "moby" {
-		if len(opt.Options.Tags) > 0 {
-			exporterAttrs["name"] = strings.Join(opt.Options.Tags, ",")
+	if (exporterName == client.ExporterImage || exporterName == mobyexporter.Moby) && len(opt.Options.Tags) > 0 {
+		nameAttr, err := overrides.SanitizeRepoAndTags(opt.Options.Tags)
+		if err != nil {
+			return nil, err
+		}
+		if exporterAttrs == nil {
+			exporterAttrs = make(map[string]string)
 		}
 		}
+		exporterAttrs["name"] = strings.Join(nameAttr, ",")
 	}
 	}
 
 
 	cache := controlapi.CacheOptions{}
 	cache := controlapi.CacheOptions{}
-
 	if inlineCache := opt.Options.BuildArgs["BUILDKIT_INLINE_CACHE"]; inlineCache != nil {
 	if inlineCache := opt.Options.BuildArgs["BUILDKIT_INLINE_CACHE"]; inlineCache != nil {
 		if b, err := strconv.ParseBool(*inlineCache); err == nil && b {
 		if b, err := strconv.ParseBool(*inlineCache); err == nil && b {
 			cache.Exports = append(cache.Exports, &controlapi.CacheOptionsEntry{
 			cache.Exports = append(cache.Exports, &controlapi.CacheOptionsEntry{
@@ -385,7 +401,7 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
-		if exporterName != "moby" {
+		if exporterName != mobyexporter.Moby && exporterName != client.ExporterImage {
 			return nil
 			return nil
 		}
 		}
 		id, ok := resp.ExporterResponse["containerimage.digest"]
 		id, ok := resp.ExporterResponse["containerimage.digest"]

+ 123 - 17
builder/builder-next/controller.go

@@ -5,7 +5,9 @@ import (
 	"net/http"
 	"net/http"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
+	"time"
 
 
+	ctd "github.com/containerd/containerd"
 	"github.com/containerd/containerd/content/local"
 	"github.com/containerd/containerd/content/local"
 	ctdmetadata "github.com/containerd/containerd/metadata"
 	ctdmetadata "github.com/containerd/containerd/metadata"
 	"github.com/containerd/containerd/snapshots"
 	"github.com/containerd/containerd/snapshots"
@@ -14,7 +16,8 @@ import (
 	"github.com/docker/docker/builder/builder-next/adapters/containerimage"
 	"github.com/docker/docker/builder/builder-next/adapters/containerimage"
 	"github.com/docker/docker/builder/builder-next/adapters/localinlinecache"
 	"github.com/docker/docker/builder/builder-next/adapters/localinlinecache"
 	"github.com/docker/docker/builder/builder-next/adapters/snapshot"
 	"github.com/docker/docker/builder/builder-next/adapters/snapshot"
-	containerimageexp "github.com/docker/docker/builder/builder-next/exporter"
+	"github.com/docker/docker/builder/builder-next/exporter"
+	"github.com/docker/docker/builder/builder-next/exporter/mobyexporter"
 	"github.com/docker/docker/builder/builder-next/imagerefchecker"
 	"github.com/docker/docker/builder/builder-next/imagerefchecker"
 	mobyworker "github.com/docker/docker/builder/builder-next/worker"
 	mobyworker "github.com/docker/docker/builder/builder-next/worker"
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/daemon/config"
@@ -23,8 +26,10 @@ import (
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache/metadata"
 	"github.com/moby/buildkit/cache/metadata"
 	"github.com/moby/buildkit/cache/remotecache"
 	"github.com/moby/buildkit/cache/remotecache"
+	"github.com/moby/buildkit/cache/remotecache/gha"
 	inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline"
 	inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline"
 	localremotecache "github.com/moby/buildkit/cache/remotecache/local"
 	localremotecache "github.com/moby/buildkit/cache/remotecache/local"
+	registryremotecache "github.com/moby/buildkit/cache/remotecache/registry"
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/client"
 	bkconfig "github.com/moby/buildkit/cmd/buildkitd/config"
 	bkconfig "github.com/moby/buildkit/cmd/buildkitd/config"
 	"github.com/moby/buildkit/control"
 	"github.com/moby/buildkit/control"
@@ -37,13 +42,122 @@ import (
 	"github.com/moby/buildkit/util/archutil"
 	"github.com/moby/buildkit/util/archutil"
 	"github.com/moby/buildkit/util/entitlements"
 	"github.com/moby/buildkit/util/entitlements"
 	"github.com/moby/buildkit/util/leaseutil"
 	"github.com/moby/buildkit/util/leaseutil"
+	"github.com/moby/buildkit/util/network/netproviders"
 	"github.com/moby/buildkit/worker"
 	"github.com/moby/buildkit/worker"
+	"github.com/moby/buildkit/worker/containerd"
+	"github.com/moby/buildkit/worker/label"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"go.etcd.io/bbolt"
 	"go.etcd.io/bbolt"
 	bolt "go.etcd.io/bbolt"
 	bolt "go.etcd.io/bbolt"
 )
 )
 
 
-func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
+func newController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
+	if opt.UseSnapshotter {
+		return newSnapshotterController(ctx, rt, opt)
+	}
+	return newGraphDriverController(ctx, rt, opt)
+}
+
+func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
+	if err := os.MkdirAll(opt.Root, 0o711); err != nil {
+		return nil, err
+	}
+
+	historyDB, historyConf, err := openHistoryDB(opt.Root, opt.BuilderConfig.History)
+	if err != nil {
+		return nil, err
+	}
+
+	cacheStorage, err := bboltcachestorage.NewStore(filepath.Join(opt.Root, "cache.db"))
+	if err != nil {
+		return nil, err
+	}
+
+	nc := netproviders.Opt{
+		Mode: "host",
+	}
+	dns := getDNSConfig(opt.DNSConfig)
+
+	wo, err := containerd.NewWorkerOpt(opt.Root, opt.ContainerdAddress, opt.Snapshotter, opt.ContainerdNamespace,
+		opt.Rootless, map[string]string{
+			label.Snapshotter: opt.Snapshotter,
+		}, dns, nc, opt.ApparmorProfile, false, nil, "", ctd.WithTimeout(60*time.Second))
+	if err != nil {
+		return nil, err
+	}
+
+	policy, err := getGCPolicy(opt.BuilderConfig, opt.Root)
+	if err != nil {
+		return nil, err
+	}
+
+	wo.GCPolicy = policy
+	wo.RegistryHosts = opt.RegistryHosts
+
+	exec, err := newExecutor(opt.Root, opt.DefaultCgroupParent, opt.NetworkController, dns, opt.Rootless, opt.IdentityMapping, opt.ApparmorProfile)
+	if err != nil {
+		return nil, err
+	}
+	wo.Executor = exec
+
+	w, err := mobyworker.NewContainerdWorker(ctx, wo)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := &worker.Controller{}
+
+	err = wc.Add(w)
+	if err != nil {
+		return nil, err
+	}
+	frontends := map[string]frontend.Frontend{
+		"dockerfile.v0": forwarder.NewGatewayForwarder(wc, dockerfile.Build),
+		"gateway.v0":    gateway.NewGatewayFrontend(wc),
+	}
+
+	return control.NewController(control.Opt{
+		SessionManager:   opt.SessionManager,
+		WorkerController: wc,
+		Frontends:        frontends,
+		CacheKeyStorage:  cacheStorage,
+		ResolveCacheImporterFuncs: map[string]remotecache.ResolveCacheImporterFunc{
+			"gha":      gha.ResolveCacheImporterFunc(),
+			"local":    localremotecache.ResolveCacheImporterFunc(opt.SessionManager),
+			"registry": registryremotecache.ResolveCacheImporterFunc(opt.SessionManager, wo.ContentStore, opt.RegistryHosts),
+		},
+		ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{
+			"gha":      gha.ResolveCacheExporterFunc(),
+			"inline":   inlineremotecache.ResolveCacheExporterFunc(),
+			"local":    localremotecache.ResolveCacheExporterFunc(opt.SessionManager),
+			"registry": registryremotecache.ResolveCacheExporterFunc(opt.SessionManager, opt.RegistryHosts),
+		},
+		Entitlements:  getEntitlements(opt.BuilderConfig),
+		HistoryDB:     historyDB,
+		HistoryConfig: historyConf,
+		LeaseManager:  wo.LeaseManager,
+		ContentStore:  wo.ContentStore,
+	})
+}
+
+func openHistoryDB(root string, cfg *config.BuilderHistoryConfig) (*bolt.DB, *bkconfig.HistoryConfig, error) {
+	db, err := bbolt.Open(filepath.Join(root, "history.db"), 0o600, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var conf *bkconfig.HistoryConfig
+	if cfg != nil {
+		conf = &bkconfig.HistoryConfig{
+			MaxAge:     cfg.MaxAge,
+			MaxEntries: cfg.MaxEntries,
+		}
+	}
+
+	return db, conf, nil
+}
+
+func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
 	if err := os.MkdirAll(opt.Root, 0711); err != nil {
 	if err := os.MkdirAll(opt.Root, 0711); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -140,12 +254,12 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	differ, ok := snapshotter.(containerimageexp.Differ)
+	differ, ok := snapshotter.(mobyexporter.Differ)
 	if !ok {
 	if !ok {
 		return nil, errors.Errorf("snapshotter doesn't support differ")
 		return nil, errors.Errorf("snapshotter doesn't support differ")
 	}
 	}
 
 
-	exp, err := containerimageexp.New(containerimageexp.Opt{
+	exp, err := mobyexporter.New(mobyexporter.Opt{
 		ImageStore:     dist.ImageStore,
 		ImageStore:     dist.ImageStore,
 		ReferenceStore: dist.ReferenceStore,
 		ReferenceStore: dist.ReferenceStore,
 		Differ:         differ,
 		Differ:         differ,
@@ -159,7 +273,7 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	historyDB, err := bbolt.Open(filepath.Join(opt.Root, "history.db"), 0o600, nil)
+	historyDB, historyConf, err := openHistoryDB(opt.Root, opt.BuilderConfig.History)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -174,16 +288,16 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
 		return nil, errors.Errorf("snapshotter doesn't support differ")
 		return nil, errors.Errorf("snapshotter doesn't support differ")
 	}
 	}
 
 
-	leases, err := lm.List(context.TODO(), "labels.\"buildkit/lease.temporary\"")
+	leases, err := lm.List(ctx, "labels.\"buildkit/lease.temporary\"")
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	for _, l := range leases {
 	for _, l := range leases {
-		lm.Delete(context.TODO(), l)
+		lm.Delete(ctx, l)
 	}
 	}
 
 
 	wopt := mobyworker.Opt{
 	wopt := mobyworker.Opt{
-		ID:                "moby",
+		ID:                exporter.Moby,
 		ContentStore:      store,
 		ContentStore:      store,
 		CacheManager:      cm,
 		CacheManager:      cm,
 		GCPolicy:          gcPolicy,
 		GCPolicy:          gcPolicy,
@@ -211,14 +325,6 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
 		"gateway.v0":    gateway.NewGatewayFrontend(wc),
 		"gateway.v0":    gateway.NewGatewayFrontend(wc),
 	}
 	}
 
 
-	var hconf *bkconfig.HistoryConfig
-	if opt.BuilderConfig.History != nil {
-		hconf = &bkconfig.HistoryConfig{
-			MaxAge:     opt.BuilderConfig.History.MaxAge,
-			MaxEntries: opt.BuilderConfig.History.MaxEntries,
-		}
-	}
-
 	return control.NewController(control.Opt{
 	return control.NewController(control.Opt{
 		SessionManager:   opt.SessionManager,
 		SessionManager:   opt.SessionManager,
 		WorkerController: wc,
 		WorkerController: wc,
@@ -235,7 +341,7 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
 		LeaseManager:  lm,
 		LeaseManager:  lm,
 		ContentStore:  store,
 		ContentStore:  store,
 		HistoryDB:     historyDB,
 		HistoryDB:     historyDB,
-		HistoryConfig: hconf,
+		HistoryConfig: historyConf,
 	})
 	})
 }
 }
 
 

+ 3 - 0
builder/builder-next/exporter/exporter.go

@@ -0,0 +1,3 @@
+package exporter
+
+const Moby = "moby"

+ 1 - 1
builder/builder-next/exporter/export.go → builder/builder-next/exporter/mobyexporter/export.go

@@ -1,4 +1,4 @@
-package containerimage
+package mobyexporter
 
 
 import (
 import (
 	"context"
 	"context"

+ 1 - 1
builder/builder-next/exporter/writer.go → builder/builder-next/exporter/mobyexporter/writer.go

@@ -1,4 +1,4 @@
-package containerimage
+package mobyexporter
 
 
 import (
 import (
 	"context"
 	"context"

+ 34 - 0
builder/builder-next/exporter/overrides/overrides.go

@@ -0,0 +1,34 @@
+package overrides
+
+import (
+	"errors"
+
+	"github.com/docker/distribution/reference"
+)
+
+// SanitizeRepoAndTags parses the raw names to a slice of repoAndTag.
+// It removes duplicates and validates each repoName and tag to not contain a digest.
+func SanitizeRepoAndTags(names []string) (repoAndTags []string, err error) {
+	uniqNames := map[string]struct{}{}
+	for _, repo := range names {
+		if repo == "" {
+			continue
+		}
+
+		ref, err := reference.ParseNormalizedNamed(repo)
+		if err != nil {
+			return nil, err
+		}
+
+		if _, ok := ref.(reference.Digested); ok {
+			return nil, errors.New("build tag cannot contain a digest")
+		}
+
+		nameWithTag := reference.TagNameOnly(ref).String()
+		if _, exists := uniqNames[nameWithTag]; !exists {
+			uniqNames[nameWithTag] = struct{}{}
+			repoAndTags = append(repoAndTags, nameWithTag)
+		}
+	}
+	return repoAndTags, nil
+}

+ 33 - 0
builder/builder-next/exporter/overrides/wrapper.go

@@ -0,0 +1,33 @@
+package overrides
+
+import (
+	"context"
+	"strings"
+
+	"github.com/moby/buildkit/exporter"
+)
+
+// Wraps the containerimage exporter's Resolve method to apply moby-specific
+// overrides to the exporter attributes.
+type imageExporterMobyWrapper struct {
+	exp exporter.Exporter
+}
+
+func NewExporterWrapper(exp exporter.Exporter) (exporter.Exporter, error) {
+	return &imageExporterMobyWrapper{exp: exp}, nil
+}
+
+// Resolve applies moby specific attributes to the request.
+func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, exporterAttrs map[string]string) (exporter.ExporterInstance, error) {
+	if exporterAttrs == nil {
+		exporterAttrs = make(map[string]string)
+	}
+	reposAndTags, err := SanitizeRepoAndTags(strings.Split(exporterAttrs["name"], ","))
+	if err != nil {
+		return nil, err
+	}
+	exporterAttrs["name"] = strings.Join(reposAndTags, ",")
+	exporterAttrs["unpack"] = "true"
+
+	return e.exp.Resolve(ctx, exporterAttrs)
+}

+ 40 - 0
builder/builder-next/worker/containerdworker.go

@@ -0,0 +1,40 @@
+package worker
+
+import (
+	"context"
+
+	mobyexporter "github.com/docker/docker/builder/builder-next/exporter"
+	"github.com/docker/docker/builder/builder-next/exporter/overrides"
+	"github.com/moby/buildkit/client"
+	"github.com/moby/buildkit/exporter"
+	"github.com/moby/buildkit/session"
+	"github.com/moby/buildkit/worker/base"
+)
+
+// ContainerdWorker is a local worker instance with dedicated snapshotter, cache, and so on.
+type ContainerdWorker struct {
+	*base.Worker
+}
+
+// NewContainerdWorker instantiates a local worker.
+func NewContainerdWorker(ctx context.Context, wo base.WorkerOpt) (*ContainerdWorker, error) {
+	bw, err := base.NewWorker(ctx, wo)
+	if err != nil {
+		return nil, err
+	}
+	return &ContainerdWorker{Worker: bw}, nil
+}
+
+// Exporter returns exporter by name
+func (w *ContainerdWorker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
+	switch name {
+	case mobyexporter.Moby:
+		exp, err := w.Worker.Exporter(client.ExporterImage, sm)
+		if err != nil {
+			return nil, err
+		}
+		return overrides.NewExporterWrapper(exp)
+	default:
+		return w.Worker.Exporter(name, sm)
+	}
+}

+ 2 - 1
builder/builder-next/worker/worker.go

@@ -13,6 +13,7 @@ import (
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/rootfs"
 	"github.com/containerd/containerd/rootfs"
 	"github.com/docker/docker/builder/builder-next/adapters/containerimage"
 	"github.com/docker/docker/builder/builder-next/adapters/containerimage"
+	mobyexporter "github.com/docker/docker/builder/builder-next/exporter"
 	distmetadata "github.com/docker/docker/distribution/metadata"
 	distmetadata "github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/xfer"
 	"github.com/docker/docker/distribution/xfer"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/image"
@@ -248,7 +249,7 @@ func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...cl
 // Exporter returns exporter by name
 // Exporter returns exporter by name
 func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
 func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
 	switch name {
 	switch name {
-	case "moby":
+	case mobyexporter.Moby:
 		return w.Opt.Exporter, nil
 		return w.Opt.Exporter, nil
 	case client.ExporterLocal:
 	case client.ExporterLocal:
 		return localexporter.New(localexporter.Opt{
 		return localexporter.New(localexporter.Opt{

+ 31 - 25
cmd/dockerd/daemon.go

@@ -222,7 +222,10 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 
 
 	logrus.Info("Daemon has completed initialization")
 	logrus.Info("Daemon has completed initialization")
 
 
-	routerOptions, err := newRouterOptions(cli.Config, d)
+	routerCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+	defer cancel()
+
+	routerOptions, err := newRouterOptions(routerCtx, cli.Config, d)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -272,7 +275,7 @@ type routerOptions struct {
 	cluster        *cluster.Cluster
 	cluster        *cluster.Cluster
 }
 }
 
 
-func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) {
+func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daemon) (routerOptions, error) {
 	opts := routerOptions{}
 	opts := routerOptions{}
 	sm, err := session.NewManager()
 	sm, err := session.NewManager()
 	if err != nil {
 	if err != nil {
@@ -289,33 +292,36 @@ func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, e
 		features:       d.Features(),
 		features:       d.Features(),
 		daemon:         d,
 		daemon:         d,
 	}
 	}
-	if !d.UsesSnapshotter() {
-		bk, err := buildkit.New(buildkit.Opt{
-			SessionManager:      sm,
-			Root:                filepath.Join(config.Root, "buildkit"),
-			Dist:                d.DistributionServices(),
-			NetworkController:   d.NetworkController(),
-			DefaultCgroupParent: cgroupParent,
-			RegistryHosts:       d.RegistryHosts(),
-			BuilderConfig:       config.Builder,
-			Rootless:            d.Rootless(),
-			IdentityMapping:     d.IdentityMapping(),
-			DNSConfig:           config.DNSConfig,
-			ApparmorProfile:     daemon.DefaultApparmorProfile(),
-		})
-		if err != nil {
-			return opts, err
-		}
 
 
-		bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService)
-		if err != nil {
-			return opts, errors.Wrap(err, "failed to create buildmanager")
-		}
+	bk, err := buildkit.New(ctx, buildkit.Opt{
+		SessionManager:      sm,
+		Root:                filepath.Join(config.Root, "buildkit"),
+		Dist:                d.DistributionServices(),
+		NetworkController:   d.NetworkController(),
+		DefaultCgroupParent: cgroupParent,
+		RegistryHosts:       d.RegistryHosts(),
+		BuilderConfig:       config.Builder,
+		Rootless:            d.Rootless(),
+		IdentityMapping:     d.IdentityMapping(),
+		DNSConfig:           config.DNSConfig,
+		ApparmorProfile:     daemon.DefaultApparmorProfile(),
+		UseSnapshotter:      d.UsesSnapshotter(),
+		Snapshotter:         d.ImageService().StorageDriver(),
+		ContainerdAddress:   config.ContainerdAddr,
+		ContainerdNamespace: config.ContainerdNamespace,
+	})
+	if err != nil {
+		return opts, err
+	}
 
 
-		ro.buildBackend = bb
-		ro.buildkit = bk
+	bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService)
+	if err != nil {
+		return opts, errors.Wrap(err, "failed to create buildmanager")
 	}
 	}
 
 
+	ro.buildBackend = bb
+	ro.buildkit = bk
+
 	return ro, nil
 	return ro, nil
 }
 }
 
 

+ 6 - 1
vendor.mod

@@ -116,11 +116,14 @@ require (
 	github.com/cilium/ebpf v0.7.0 // indirect
 	github.com/cilium/ebpf v0.7.0 // indirect
 	github.com/container-storage-interface/spec v1.5.0 // indirect
 	github.com/container-storage-interface/spec v1.5.0 // indirect
 	github.com/containerd/console v1.0.3 // indirect
 	github.com/containerd/console v1.0.3 // indirect
+	github.com/containerd/go-cni v1.1.6 // indirect
 	github.com/containerd/go-runc v1.0.0 // indirect
 	github.com/containerd/go-runc v1.0.0 // indirect
 	github.com/containerd/nydus-snapshotter v0.3.1 // indirect
 	github.com/containerd/nydus-snapshotter v0.3.1 // indirect
 	github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect
 	github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect
 	github.com/containerd/ttrpc v1.1.0 // indirect
 	github.com/containerd/ttrpc v1.1.0 // indirect
+	github.com/containernetworking/cni v1.1.1 // indirect
 	github.com/cyphar/filepath-securejoin v0.2.3 // indirect
 	github.com/cyphar/filepath-securejoin v0.2.3 // indirect
+	github.com/dimchansky/utfbom v1.1.1 // indirect
 	github.com/dustin/go-humanize v1.0.0 // indirect
 	github.com/dustin/go-humanize v1.0.0 // indirect
 	github.com/felixge/httpsnoop v1.0.2 // indirect
 	github.com/felixge/httpsnoop v1.0.2 // indirect
 	github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee // indirect
 	github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee // indirect
@@ -128,6 +131,7 @@ require (
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/gofrs/flock v0.8.1 // indirect
 	github.com/gofrs/flock v0.8.1 // indirect
 	github.com/gogo/googleapis v1.4.1 // indirect
 	github.com/gogo/googleapis v1.4.1 // indirect
+	github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
 	github.com/golang/protobuf v1.5.2 // indirect
 	github.com/golang/protobuf v1.5.2 // indirect
 	github.com/google/btree v1.1.2 // indirect
 	github.com/google/btree v1.1.2 // indirect
@@ -146,7 +150,7 @@ require (
 	github.com/inconshreveable/mousetrap v1.0.1 // indirect
 	github.com/inconshreveable/mousetrap v1.0.1 // indirect
 	github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
 	github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
 	github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
 	github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
-	github.com/onsi/ginkgo v1.16.4 // indirect
+	github.com/onsi/ginkgo/v2 v2.1.4 // indirect
 	github.com/onsi/gomega v1.20.1 // indirect
 	github.com/onsi/gomega v1.20.1 // indirect
 	github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 // indirect
 	github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 // indirect
 	github.com/philhofer/fwd v1.1.2 // indirect
 	github.com/philhofer/fwd v1.1.2 // indirect
@@ -158,6 +162,7 @@ require (
 	github.com/shibumi/go-pathspec v1.3.0 // indirect
 	github.com/shibumi/go-pathspec v1.3.0 // indirect
 	github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f // indirect
 	github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f // indirect
 	github.com/tinylib/msgp v1.1.6 // indirect
 	github.com/tinylib/msgp v1.1.6 // indirect
+	github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 // indirect
 	github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
 	github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
 	go.etcd.io/etcd/client/pkg/v3 v3.5.6 // indirect
 	go.etcd.io/etcd/client/pkg/v3 v3.5.6 // indirect
 	go.etcd.io/etcd/pkg/v3 v3.5.6 // indirect
 	go.etcd.io/etcd/pkg/v3 v3.5.6 // indirect

Разлика између датотеке није приказан због своје велике величине
+ 345 - 0
vendor.sum


+ 328 - 0
vendor/github.com/Microsoft/hcsshim/hcn/hcn.go

@@ -0,0 +1,328 @@
+// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server
+// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS).
+package hcn
+
+import (
+	"encoding/json"
+	"fmt"
+	"syscall"
+
+	"github.com/Microsoft/go-winio/pkg/guid"
+)
+
+//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go hcn.go
+
+/// HNS V1 API
+
+//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId
+//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall?
+
+/// HCN V2 API
+
+// Network
+//sys hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNetworks?
+//sys hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnCreateNetwork?
+//sys hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnOpenNetwork?
+//sys hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNetwork?
+//sys hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNetworkProperties?
+//sys hcnDeleteNetwork(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNetwork?
+//sys hcnCloseNetwork(network hcnNetwork) (hr error) = computenetwork.HcnCloseNetwork?
+
+// Endpoint
+//sys hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateEndpoints?
+//sys hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnCreateEndpoint?
+//sys hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnOpenEndpoint?
+//sys hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) = computenetwork.HcnModifyEndpoint?
+//sys hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryEndpointProperties?
+//sys hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteEndpoint?
+//sys hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) = computenetwork.HcnCloseEndpoint?
+
+// Namespace
+//sys hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNamespaces?
+//sys hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnCreateNamespace?
+//sys hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnOpenNamespace?
+//sys hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNamespace?
+//sys hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNamespaceProperties?
+//sys hcnDeleteNamespace(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNamespace?
+//sys hcnCloseNamespace(namespace hcnNamespace) (hr error) = computenetwork.HcnCloseNamespace?
+
+// LoadBalancer
+//sys hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateLoadBalancers?
+//sys hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnCreateLoadBalancer?
+//sys hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnOpenLoadBalancer?
+//sys hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) = computenetwork.HcnModifyLoadBalancer?
+//sys hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryLoadBalancerProperties?
+//sys hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteLoadBalancer?
+//sys hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) = computenetwork.HcnCloseLoadBalancer?
+
+// SDN Routes
+//sys hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateSdnRoutes?
+//sys hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnCreateSdnRoute?
+//sys hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnOpenSdnRoute?
+//sys hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) = computenetwork.HcnModifySdnRoute?
+//sys hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQuerySdnRouteProperties?
+//sys hcnDeleteRoute(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteSdnRoute?
+//sys hcnCloseRoute(route hcnRoute) (hr error) = computenetwork.HcnCloseSdnRoute?
+
+type _guid = guid.GUID
+
+type hcnNetwork syscall.Handle
+type hcnEndpoint syscall.Handle
+type hcnNamespace syscall.Handle
+type hcnLoadBalancer syscall.Handle
+type hcnRoute syscall.Handle
+
+// SchemaVersion for HCN Objects/Queries.
+type SchemaVersion = Version // hcnglobals.go
+
+// HostComputeQueryFlags are passed in to a HostComputeQuery to determine which
+// properties of an object are returned.
+type HostComputeQueryFlags uint32
+
+var (
+	// HostComputeQueryFlagsNone returns an object with the standard properties.
+	HostComputeQueryFlagsNone HostComputeQueryFlags
+	// HostComputeQueryFlagsDetailed returns an object with all properties.
+	HostComputeQueryFlagsDetailed HostComputeQueryFlags = 1
+)
+
+// HostComputeQuery is the format for HCN queries.
+type HostComputeQuery struct {
+	SchemaVersion SchemaVersion         `json:""`
+	Flags         HostComputeQueryFlags `json:",omitempty"`
+	Filter        string                `json:",omitempty"`
+}
+
+type ExtraParams struct {
+	Resources        json.RawMessage `json:",omitempty"`
+	SharedContainers json.RawMessage `json:",omitempty"`
+	LayeredOn        string          `json:",omitempty"`
+	SwitchGuid       string          `json:",omitempty"`
+	UtilityVM        string          `json:",omitempty"`
+	VirtualMachine   string          `json:",omitempty"`
+}
+
+type Health struct {
+	Data  interface{} `json:",omitempty"`
+	Extra ExtraParams `json:",omitempty"`
+}
+
+// defaultQuery generates HCN Query.
+// Passed into get/enumerate calls to filter results.
+func defaultQuery() HostComputeQuery {
+	query := HostComputeQuery{
+		SchemaVersion: SchemaVersion{
+			Major: 2,
+			Minor: 0,
+		},
+		Flags: HostComputeQueryFlagsNone,
+	}
+	return query
+}
+
+// PlatformDoesNotSupportError happens when users are attempting to use a newer shim on an older OS
+func platformDoesNotSupportError(featureName string) error {
+	return fmt.Errorf("platform does not support feature %s", featureName)
+}
+
+// V2ApiSupported returns an error if the HCN version does not support the V2 Apis.
+func V2ApiSupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.Api.V2 {
+		return nil
+	}
+	return platformDoesNotSupportError("V2 Api/Schema")
+}
+
+func V2SchemaVersion() SchemaVersion {
+	return SchemaVersion{
+		Major: 2,
+		Minor: 0,
+	}
+}
+
+// RemoteSubnetSupported returns an error if the HCN version does not support Remote Subnet policies.
+func RemoteSubnetSupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.RemoteSubnet {
+		return nil
+	}
+	return platformDoesNotSupportError("Remote Subnet")
+}
+
+// HostRouteSupported returns an error if the HCN version does not support Host Route policies.
+func HostRouteSupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.HostRoute {
+		return nil
+	}
+	return platformDoesNotSupportError("Host Route")
+}
+
+// DSRSupported returns an error if the HCN version does not support Direct Server Return.
+func DSRSupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.DSR {
+		return nil
+	}
+	return platformDoesNotSupportError("Direct Server Return (DSR)")
+}
+
+// Slash32EndpointPrefixesSupported returns an error if the HCN version does not support configuring endpoints with /32 prefixes.
+func Slash32EndpointPrefixesSupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.Slash32EndpointPrefixes {
+		return nil
+	}
+	return platformDoesNotSupportError("Slash 32 Endpoint prefixes")
+}
+
+// AclSupportForProtocol252Supported returns an error if the HCN version does not support HNS ACL Policies to support protocol 252 for VXLAN.
+func AclSupportForProtocol252Supported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.AclSupportForProtocol252 {
+		return nil
+	}
+	return platformDoesNotSupportError("HNS ACL Policies to support protocol 252 for VXLAN")
+}
+
+// SessionAffinitySupported returns an error if the HCN version does not support Session Affinity.
+func SessionAffinitySupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.SessionAffinity {
+		return nil
+	}
+	return platformDoesNotSupportError("Session Affinity")
+}
+
+// IPv6DualStackSupported returns an error if the HCN version does not support IPv6DualStack.
+func IPv6DualStackSupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.IPv6DualStack {
+		return nil
+	}
+	return platformDoesNotSupportError("IPv6 DualStack")
+}
+
+//L4proxySupported returns an error if the HCN verison does not support L4Proxy
+func L4proxyPolicySupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.L4Proxy {
+		return nil
+	}
+	return platformDoesNotSupportError("L4ProxyPolicy")
+}
+
+// L4WfpProxySupported returns an error if the HCN verison does not support L4WfpProxy
+func L4WfpProxyPolicySupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.L4WfpProxy {
+		return nil
+	}
+	return platformDoesNotSupportError("L4WfpProxyPolicy")
+}
+
+// SetPolicySupported returns an error if the HCN version does not support SetPolicy.
+func SetPolicySupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.SetPolicy {
+		return nil
+	}
+	return platformDoesNotSupportError("SetPolicy")
+}
+
+// VxlanPortSupported returns an error if the HCN version does not support configuring the VXLAN TCP port.
+func VxlanPortSupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.VxlanPort {
+		return nil
+	}
+	return platformDoesNotSupportError("VXLAN port configuration")
+}
+
+// TierAclPolicySupported returns an error if the HCN version does not support configuring the TierAcl.
+func TierAclPolicySupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.TierAcl {
+		return nil
+	}
+	return platformDoesNotSupportError("TierAcl")
+}
+
+// NetworkACLPolicySupported returns an error if the HCN version does not support NetworkACLPolicy
+func NetworkACLPolicySupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.NetworkACL {
+		return nil
+	}
+	return platformDoesNotSupportError("NetworkACL")
+}
+
+// NestedIpSetSupported returns an error if the HCN version does not support NestedIpSet
+func NestedIpSetSupported() error {
+	supported, err := GetCachedSupportedFeatures()
+	if err != nil {
+		return err
+	}
+	if supported.NestedIpSet {
+		return nil
+	}
+	return platformDoesNotSupportError("NestedIpSet")
+}
+
+// RequestType are the different operations performed to settings.
+// Used to update the settings of Endpoint/Namespace objects.
+type RequestType string
+
+var (
+	// RequestTypeAdd adds the provided settings object.
+	RequestTypeAdd RequestType = "Add"
+	// RequestTypeRemove removes the provided settings object.
+	RequestTypeRemove RequestType = "Remove"
+	// RequestTypeUpdate replaces settings with the ones provided.
+	RequestTypeUpdate RequestType = "Update"
+	// RequestTypeRefresh refreshes the settings provided.
+	RequestTypeRefresh RequestType = "Refresh"
+)

+ 388 - 0
vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go

@@ -0,0 +1,388 @@
+package hcn
+
+import (
+	"encoding/json"
+	"errors"
+
+	"github.com/Microsoft/go-winio/pkg/guid"
+	"github.com/Microsoft/hcsshim/internal/interop"
+	"github.com/sirupsen/logrus"
+)
+
+// IpConfig is assoicated with an endpoint
+type IpConfig struct {
+	IpAddress    string `json:",omitempty"`
+	PrefixLength uint8  `json:",omitempty"`
+}
+
+// EndpointFlags are special settings on an endpoint.
+type EndpointFlags uint32
+
+var (
+	// EndpointFlagsNone is the default.
+	EndpointFlagsNone EndpointFlags
+	// EndpointFlagsRemoteEndpoint means that an endpoint is on another host.
+	EndpointFlagsRemoteEndpoint EndpointFlags = 1
+)
+
+// HostComputeEndpoint represents a network endpoint
+type HostComputeEndpoint struct {
+	Id                   string           `json:"ID,omitempty"`
+	Name                 string           `json:",omitempty"`
+	HostComputeNetwork   string           `json:",omitempty"` // GUID
+	HostComputeNamespace string           `json:",omitempty"` // GUID
+	Policies             []EndpointPolicy `json:",omitempty"`
+	IpConfigurations     []IpConfig       `json:",omitempty"`
+	Dns                  Dns              `json:",omitempty"`
+	Routes               []Route          `json:",omitempty"`
+	MacAddress           string           `json:",omitempty"`
+	Flags                EndpointFlags    `json:",omitempty"`
+	Health               Health           `json:",omitempty"`
+	SchemaVersion        SchemaVersion    `json:",omitempty"`
+}
+
+// EndpointResourceType are the two different Endpoint settings resources.
+type EndpointResourceType string
+
+var (
+	// EndpointResourceTypePolicy is for Endpoint Policies. Ex: ACL, NAT
+	EndpointResourceTypePolicy EndpointResourceType = "Policy"
+	// EndpointResourceTypePort is for Endpoint Port settings.
+	EndpointResourceTypePort EndpointResourceType = "Port"
+)
+
+// ModifyEndpointSettingRequest is the structure used to send request to modify an endpoint.
+// Used to update policy/port on an endpoint.
+type ModifyEndpointSettingRequest struct {
+	ResourceType EndpointResourceType `json:",omitempty"` // Policy, Port
+	RequestType  RequestType          `json:",omitempty"` // Add, Remove, Update, Refresh
+	Settings     json.RawMessage      `json:",omitempty"`
+}
+
+// VmEndpointRequest creates a switch port with identifier `PortId`.
+type VmEndpointRequest struct {
+	PortId           guid.GUID `json:",omitempty"`
+	VirtualNicName   string    `json:",omitempty"`
+	VirtualMachineId guid.GUID `json:",omitempty"`
+}
+
+type PolicyEndpointRequest struct {
+	Policies []EndpointPolicy `json:",omitempty"`
+}
+
+func getEndpoint(endpointGuid guid.GUID, query string) (*HostComputeEndpoint, error) {
+	// Open endpoint.
+	var (
+		endpointHandle   hcnEndpoint
+		resultBuffer     *uint16
+		propertiesBuffer *uint16
+	)
+	hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer)
+	if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Query endpoint.
+	hr = hcnQueryEndpointProperties(endpointHandle, query, &propertiesBuffer, &resultBuffer)
+	if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
+	// Close endpoint.
+	hr = hcnCloseEndpoint(endpointHandle)
+	if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil {
+		return nil, err
+	}
+	// Convert output to HostComputeEndpoint
+	var outputEndpoint HostComputeEndpoint
+	if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil {
+		return nil, err
+	}
+	return &outputEndpoint, nil
+}
+
+func enumerateEndpoints(query string) ([]HostComputeEndpoint, error) {
+	// Enumerate all Endpoint Guids
+	var (
+		resultBuffer   *uint16
+		endpointBuffer *uint16
+	)
+	hr := hcnEnumerateEndpoints(query, &endpointBuffer, &resultBuffer)
+	if err := checkForErrors("hcnEnumerateEndpoints", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+
+	endpoints := interop.ConvertAndFreeCoTaskMemString(endpointBuffer)
+	var endpointIds []guid.GUID
+	err := json.Unmarshal([]byte(endpoints), &endpointIds)
+	if err != nil {
+		return nil, err
+	}
+
+	var outputEndpoints []HostComputeEndpoint
+	for _, endpointGuid := range endpointIds {
+		endpoint, err := getEndpoint(endpointGuid, query)
+		if err != nil {
+			return nil, err
+		}
+		outputEndpoints = append(outputEndpoints, *endpoint)
+	}
+	return outputEndpoints, nil
+}
+
+func createEndpoint(networkId string, endpointSettings string) (*HostComputeEndpoint, error) {
+	networkGuid, err := guid.FromString(networkId)
+	if err != nil {
+		return nil, errInvalidNetworkID
+	}
+	// Open network.
+	var networkHandle hcnNetwork
+	var resultBuffer *uint16
+	hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer)
+	if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Create endpoint.
+	endpointId := guid.GUID{}
+	var endpointHandle hcnEndpoint
+	hr = hcnCreateEndpoint(networkHandle, &endpointId, endpointSettings, &endpointHandle, &resultBuffer)
+	if err := checkForErrors("hcnCreateEndpoint", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Query endpoint.
+	hcnQuery := defaultQuery()
+	query, err := json.Marshal(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	var propertiesBuffer *uint16
+	hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer)
+	if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
+	// Close endpoint.
+	hr = hcnCloseEndpoint(endpointHandle)
+	if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil {
+		return nil, err
+	}
+	// Close network.
+	hr = hcnCloseNetwork(networkHandle)
+	if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil {
+		return nil, err
+	}
+	// Convert output to HostComputeEndpoint
+	var outputEndpoint HostComputeEndpoint
+	if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil {
+		return nil, err
+	}
+	return &outputEndpoint, nil
+}
+
+func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, error) {
+	endpointGuid, err := guid.FromString(endpointId)
+	if err != nil {
+		return nil, errInvalidEndpointID
+	}
+	// Open endpoint
+	var (
+		endpointHandle   hcnEndpoint
+		resultBuffer     *uint16
+		propertiesBuffer *uint16
+	)
+	hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer)
+	if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Modify endpoint
+	hr = hcnModifyEndpoint(endpointHandle, settings, &resultBuffer)
+	if err := checkForErrors("hcnModifyEndpoint", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Query endpoint.
+	hcnQuery := defaultQuery()
+	query, err := json.Marshal(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer)
+	if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
+	// Close endpoint.
+	hr = hcnCloseEndpoint(endpointHandle)
+	if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil {
+		return nil, err
+	}
+	// Convert output to HostComputeEndpoint
+	var outputEndpoint HostComputeEndpoint
+	if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil {
+		return nil, err
+	}
+	return &outputEndpoint, nil
+}
+
+func deleteEndpoint(endpointId string) error {
+	endpointGuid, err := guid.FromString(endpointId)
+	if err != nil {
+		return errInvalidEndpointID
+	}
+	var resultBuffer *uint16
+	hr := hcnDeleteEndpoint(&endpointGuid, &resultBuffer)
+	if err := checkForErrors("hcnDeleteEndpoint", hr, resultBuffer); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ListEndpoints makes a call to list all available endpoints.
+func ListEndpoints() ([]HostComputeEndpoint, error) {
+	hcnQuery := defaultQuery()
+	endpoints, err := ListEndpointsQuery(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	return endpoints, nil
+}
+
+// ListEndpointsQuery makes a call to query the list of available endpoints.
+func ListEndpointsQuery(query HostComputeQuery) ([]HostComputeEndpoint, error) {
+	queryJson, err := json.Marshal(query)
+	if err != nil {
+		return nil, err
+	}
+
+	endpoints, err := enumerateEndpoints(string(queryJson))
+	if err != nil {
+		return nil, err
+	}
+	return endpoints, nil
+}
+
+// ListEndpointsOfNetwork queries the list of endpoints on a network.
+func ListEndpointsOfNetwork(networkId string) ([]HostComputeEndpoint, error) {
+	hcnQuery := defaultQuery()
+	// TODO: Once query can convert schema, change to {HostComputeNetwork:networkId}
+	mapA := map[string]string{"VirtualNetwork": networkId}
+	filter, err := json.Marshal(mapA)
+	if err != nil {
+		return nil, err
+	}
+	hcnQuery.Filter = string(filter)
+
+	return ListEndpointsQuery(hcnQuery)
+}
+
+// GetEndpointByID returns an endpoint specified by Id
+func GetEndpointByID(endpointId string) (*HostComputeEndpoint, error) {
+	hcnQuery := defaultQuery()
+	mapA := map[string]string{"ID": endpointId}
+	filter, err := json.Marshal(mapA)
+	if err != nil {
+		return nil, err
+	}
+	hcnQuery.Filter = string(filter)
+
+	endpoints, err := ListEndpointsQuery(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	if len(endpoints) == 0 {
+		return nil, EndpointNotFoundError{EndpointID: endpointId}
+	}
+	return &endpoints[0], err
+}
+
+// GetEndpointByName returns an endpoint specified by Name
+func GetEndpointByName(endpointName string) (*HostComputeEndpoint, error) {
+	hcnQuery := defaultQuery()
+	mapA := map[string]string{"Name": endpointName}
+	filter, err := json.Marshal(mapA)
+	if err != nil {
+		return nil, err
+	}
+	hcnQuery.Filter = string(filter)
+
+	endpoints, err := ListEndpointsQuery(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	if len(endpoints) == 0 {
+		return nil, EndpointNotFoundError{EndpointName: endpointName}
+	}
+	return &endpoints[0], err
+}
+
+// Create Endpoint.
+func (endpoint *HostComputeEndpoint) Create() (*HostComputeEndpoint, error) {
+	logrus.Debugf("hcn::HostComputeEndpoint::Create id=%s", endpoint.Id)
+
+	if endpoint.HostComputeNamespace != "" {
+		return nil, errors.New("endpoint create error, endpoint json HostComputeNamespace is read only and should not be set")
+	}
+
+	jsonString, err := json.Marshal(endpoint)
+	if err != nil {
+		return nil, err
+	}
+
+	logrus.Debugf("hcn::HostComputeEndpoint::Create JSON: %s", jsonString)
+	endpoint, hcnErr := createEndpoint(endpoint.HostComputeNetwork, string(jsonString))
+	if hcnErr != nil {
+		return nil, hcnErr
+	}
+	return endpoint, nil
+}
+
+// Delete Endpoint.
+func (endpoint *HostComputeEndpoint) Delete() error {
+	logrus.Debugf("hcn::HostComputeEndpoint::Delete id=%s", endpoint.Id)
+
+	if err := deleteEndpoint(endpoint.Id); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ModifyEndpointSettings updates the Port/Policy of an Endpoint.
+func ModifyEndpointSettings(endpointId string, request *ModifyEndpointSettingRequest) error {
+	logrus.Debugf("hcn::HostComputeEndpoint::ModifyEndpointSettings id=%s", endpointId)
+
+	endpointSettingsRequest, err := json.Marshal(request)
+	if err != nil {
+		return err
+	}
+
+	_, err = modifyEndpoint(endpointId, string(endpointSettingsRequest))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// ApplyPolicy applies a Policy (ex: ACL) on the Endpoint.
+func (endpoint *HostComputeEndpoint) ApplyPolicy(requestType RequestType, endpointPolicy PolicyEndpointRequest) error {
+	logrus.Debugf("hcn::HostComputeEndpoint::ApplyPolicy id=%s", endpoint.Id)
+
+	settingsJson, err := json.Marshal(endpointPolicy)
+	if err != nil {
+		return err
+	}
+	requestMessage := &ModifyEndpointSettingRequest{
+		ResourceType: EndpointResourceTypePolicy,
+		RequestType:  requestType,
+		Settings:     settingsJson,
+	}
+
+	return ModifyEndpointSettings(endpoint.Id, requestMessage)
+}
+
+// NamespaceAttach modifies a Namespace to add an endpoint.
+func (endpoint *HostComputeEndpoint) NamespaceAttach(namespaceId string) error {
+	return AddNamespaceEndpoint(namespaceId, endpoint.Id)
+}
+
+// NamespaceDetach modifies a Namespace to remove an endpoint.
+func (endpoint *HostComputeEndpoint) NamespaceDetach(namespaceId string) error {
+	return RemoveNamespaceEndpoint(namespaceId, endpoint.Id)
+}

+ 164 - 0
vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go

@@ -0,0 +1,164 @@
+// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server
+// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS).
+package hcn
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/Microsoft/hcsshim/internal/hcs"
+	"github.com/Microsoft/hcsshim/internal/hcserror"
+	"github.com/Microsoft/hcsshim/internal/interop"
+	"github.com/sirupsen/logrus"
+)
+
+var (
+	errInvalidNetworkID      = errors.New("invalid network ID")
+	errInvalidEndpointID     = errors.New("invalid endpoint ID")
+	errInvalidNamespaceID    = errors.New("invalid namespace ID")
+	errInvalidLoadBalancerID = errors.New("invalid load balancer ID")
+	errInvalidRouteID        = errors.New("invalid route ID")
+)
+
+func checkForErrors(methodName string, hr error, resultBuffer *uint16) error {
+	errorFound := false
+
+	if hr != nil {
+		errorFound = true
+	}
+
+	result := ""
+	if resultBuffer != nil {
+		result = interop.ConvertAndFreeCoTaskMemString(resultBuffer)
+		if result != "" {
+			errorFound = true
+		}
+	}
+
+	if errorFound {
+		returnError := new(hr, methodName, result)
+		logrus.Debugf(returnError.Error()) // HCN errors logged for debugging.
+		return returnError
+	}
+
+	return nil
+}
+
+type ErrorCode uint32
+
+// For common errors, define the error as it is in windows, so we can quickly determine it later
+const (
+	ERROR_NOT_FOUND                     = 0x490
+	HCN_E_PORT_ALREADY_EXISTS ErrorCode = 0x803b0013
+)
+
+type HcnError struct {
+	*hcserror.HcsError
+	code ErrorCode
+}
+
+func (e *HcnError) Error() string {
+	return e.HcsError.Error()
+}
+
+func CheckErrorWithCode(err error, code ErrorCode) bool {
+	hcnError, ok := err.(*HcnError)
+	if ok {
+		return hcnError.code == code
+	}
+	return false
+}
+
+func IsElementNotFoundError(err error) bool {
+	return CheckErrorWithCode(err, ERROR_NOT_FOUND)
+}
+
+func IsPortAlreadyExistsError(err error) bool {
+	return CheckErrorWithCode(err, HCN_E_PORT_ALREADY_EXISTS)
+}
+
+func new(hr error, title string, rest string) error {
+	err := &HcnError{}
+	hcsError := hcserror.New(hr, title, rest)
+	err.HcsError = hcsError.(*hcserror.HcsError)
+	err.code = ErrorCode(hcserror.Win32FromError(hr))
+	return err
+}
+
+//
+// Note that the below errors are not errors returned by hcn itself
+// we wish to seperate them as they are shim usage error
+//
+
+// NetworkNotFoundError results from a failed seach for a network by Id or Name
+type NetworkNotFoundError struct {
+	NetworkName string
+	NetworkID   string
+}
+
+func (e NetworkNotFoundError) Error() string {
+	if e.NetworkName != "" {
+		return fmt.Sprintf("Network name %q not found", e.NetworkName)
+	}
+	return fmt.Sprintf("Network ID %q not found", e.NetworkID)
+}
+
+// EndpointNotFoundError results from a failed seach for an endpoint by Id or Name
+type EndpointNotFoundError struct {
+	EndpointName string
+	EndpointID   string
+}
+
+func (e EndpointNotFoundError) Error() string {
+	if e.EndpointName != "" {
+		return fmt.Sprintf("Endpoint name %q not found", e.EndpointName)
+	}
+	return fmt.Sprintf("Endpoint ID %q not found", e.EndpointID)
+}
+
+// NamespaceNotFoundError results from a failed seach for a namsepace by Id
+type NamespaceNotFoundError struct {
+	NamespaceID string
+}
+
+func (e NamespaceNotFoundError) Error() string {
+	return fmt.Sprintf("Namespace ID %q not found", e.NamespaceID)
+}
+
+// LoadBalancerNotFoundError results from a failed seach for a loadbalancer by Id
+type LoadBalancerNotFoundError struct {
+	LoadBalancerId string
+}
+
+func (e LoadBalancerNotFoundError) Error() string {
+	return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId)
+}
+
+// RouteNotFoundError results from a failed seach for a route by Id
+type RouteNotFoundError struct {
+	RouteId string
+}
+
+func (e RouteNotFoundError) Error() string {
+	return fmt.Sprintf("SDN Route %q not found", e.RouteId)
+}
+
+// IsNotFoundError returns a boolean indicating whether the error was caused by
+// a resource not being found.
+func IsNotFoundError(err error) bool {
+	switch pe := err.(type) {
+	case NetworkNotFoundError:
+		return true
+	case EndpointNotFoundError:
+		return true
+	case NamespaceNotFoundError:
+		return true
+	case LoadBalancerNotFoundError:
+		return true
+	case RouteNotFoundError:
+		return true
+	case *hcserror.HcsError:
+		return pe.Err == hcs.ErrElementNotFound
+	}
+	return false
+}

+ 138 - 0
vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go

@@ -0,0 +1,138 @@
+package hcn
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+
+	"github.com/Microsoft/hcsshim/internal/hcserror"
+	"github.com/Microsoft/hcsshim/internal/interop"
+	"github.com/sirupsen/logrus"
+)
+
+// Globals are all global properties of the HCN Service.
+type Globals struct {
+	Version Version `json:"Version"`
+}
+
+// Version is the HCN Service version.
+type Version struct {
+	Major int `json:"Major"`
+	Minor int `json:"Minor"`
+}
+
+type VersionRange struct {
+	MinVersion Version
+	MaxVersion Version
+}
+
+type VersionRanges []VersionRange
+
+var (
+	// HNSVersion1803 added ACL functionality.
+	HNSVersion1803 = VersionRanges{VersionRange{MinVersion: Version{Major: 7, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
+	// V2ApiSupport allows the use of V2 Api calls and V2 Schema.
+	V2ApiSupport = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
+	// Remote Subnet allows for Remote Subnet policies on Overlay networks
+	RemoteSubnetVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
+	// A Host Route policy allows for local container to local host communication Overlay networks
+	HostRouteVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
+	// HNS 9.3 through 10.0 (not included), and 10.2+ allows for Direct Server Return for loadbalancing
+	DSRVersion = VersionRanges{
+		VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}},
+		VersionRange{MinVersion: Version{Major: 10, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}},
+	}
+	// HNS 9.3 through 10.0 (not included) and, 10.4+ provide support for configuring endpoints with /32 prefixes
+	Slash32EndpointPrefixesVersion = VersionRanges{
+		VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}},
+		VersionRange{MinVersion: Version{Major: 10, Minor: 4}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}},
+	}
+	// HNS 9.3 through 10.0 (not included) and, 10.4+ allow for HNS ACL Policies to support protocol 252 for VXLAN
+	AclSupportForProtocol252Version = VersionRanges{
+		VersionRange{MinVersion: Version{Major: 11, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}},
+	}
+	// HNS 12.0 allows for session affinity for loadbalancing
+	SessionAffinityVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 12, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
+	// HNS 11.10+ supports Ipv6 dual stack.
+	IPv6DualStackVersion = VersionRanges{
+		VersionRange{MinVersion: Version{Major: 11, Minor: 10}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}},
+	}
+	// HNS 13.0 allows for Set Policy support
+	SetPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
+	// HNS 10.3 allows for VXLAN ports
+	VxlanPortVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 10, Minor: 3}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
+
+	//HNS 9.5 through 10.0(not included), 10.5 through 11.0(not included), 11.11 through 12.0(not included), 12.1 through 13.0(not included), 13.1+ allows for Network L4Proxy Policy support
+	L4ProxyPolicyVersion = VersionRanges{
+		VersionRange{MinVersion: Version{Major: 9, Minor: 5}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}},
+		VersionRange{MinVersion: Version{Major: 10, Minor: 5}, MaxVersion: Version{Major: 10, Minor: math.MaxInt32}},
+		VersionRange{MinVersion: Version{Major: 11, Minor: 11}, MaxVersion: Version{Major: 11, Minor: math.MaxInt32}},
+		VersionRange{MinVersion: Version{Major: 12, Minor: 1}, MaxVersion: Version{Major: 12, Minor: math.MaxInt32}},
+		VersionRange{MinVersion: Version{Major: 13, Minor: 1}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}},
+	}
+
+	//HNS 13.2 allows for L4WfpProxy Policy support
+	L4WfpProxyPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
+
+	//HNS 14.0 allows for TierAcl Policy support
+	TierAclPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 14, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
+
+	//HNS 15.0 allows for NetworkACL Policy support
+	NetworkACLPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 15, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
+
+	//HNS 15.0 allows for NestedIpSet support
+	NestedIpSetVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 15, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}}
+)
+
+// GetGlobals returns the global properties of the HCN Service.
+func GetGlobals() (*Globals, error) {
+	var version Version
+	err := hnsCall("GET", "/globals/version", "", &version)
+	if err != nil {
+		return nil, err
+	}
+
+	globals := &Globals{
+		Version: version,
+	}
+
+	return globals, nil
+}
+
+type hnsResponse struct {
+	Success bool
+	Error   string
+	Output  json.RawMessage
+}
+
+func hnsCall(method, path, request string, returnResponse interface{}) error {
+	var responseBuffer *uint16
+	logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request)
+
+	err := _hnsCall(method, path, request, &responseBuffer)
+	if err != nil {
+		return hcserror.New(err, "hnsCall", "")
+	}
+	response := interop.ConvertAndFreeCoTaskMemString(responseBuffer)
+
+	hnsresponse := &hnsResponse{}
+	if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil {
+		return err
+	}
+
+	if !hnsresponse.Success {
+		return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error)
+	}
+
+	if len(hnsresponse.Output) == 0 {
+		return nil
+	}
+
+	logrus.Debugf("Network Response : %s", hnsresponse.Output)
+	err = json.Unmarshal(hnsresponse.Output, returnResponse)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}

+ 311 - 0
vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go

@@ -0,0 +1,311 @@
+package hcn
+
+import (
+	"encoding/json"
+
+	"github.com/Microsoft/go-winio/pkg/guid"
+	"github.com/Microsoft/hcsshim/internal/interop"
+	"github.com/sirupsen/logrus"
+)
+
+// LoadBalancerPortMapping is associated with HostComputeLoadBalancer
+type LoadBalancerPortMapping struct {
+	Protocol         uint32                       `json:",omitempty"` // EX: TCP = 6, UDP = 17
+	InternalPort     uint16                       `json:",omitempty"`
+	ExternalPort     uint16                       `json:",omitempty"`
+	DistributionType LoadBalancerDistribution     `json:",omitempty"` // EX: Distribute per connection = 0, distribute traffic of the same protocol per client IP = 1, distribute per client IP = 2
+	Flags            LoadBalancerPortMappingFlags `json:",omitempty"`
+}
+
+// HostComputeLoadBalancer represents software load balancer.
+type HostComputeLoadBalancer struct {
+	Id                   string                    `json:"ID,omitempty"`
+	HostComputeEndpoints []string                  `json:",omitempty"`
+	SourceVIP            string                    `json:",omitempty"`
+	FrontendVIPs         []string                  `json:",omitempty"`
+	PortMappings         []LoadBalancerPortMapping `json:",omitempty"`
+	SchemaVersion        SchemaVersion             `json:",omitempty"`
+	Flags                LoadBalancerFlags         `json:",omitempty"` // 0: None, 1: EnableDirectServerReturn
+}
+
+//LoadBalancerFlags modify settings for a loadbalancer.
+type LoadBalancerFlags uint32
+
+var (
+	// LoadBalancerFlagsNone is the default.
+	LoadBalancerFlagsNone LoadBalancerFlags = 0
+	// LoadBalancerFlagsDSR enables Direct Server Return (DSR)
+	LoadBalancerFlagsDSR LoadBalancerFlags = 1
+	LoadBalancerFlagsIPv6 LoadBalancerFlags = 2
+)
+
+// LoadBalancerPortMappingFlags are special settings on a loadbalancer.
+type LoadBalancerPortMappingFlags uint32
+
+var (
+	// LoadBalancerPortMappingFlagsNone is the default.
+	LoadBalancerPortMappingFlagsNone LoadBalancerPortMappingFlags
+	// LoadBalancerPortMappingFlagsILB enables internal loadbalancing.
+	LoadBalancerPortMappingFlagsILB LoadBalancerPortMappingFlags = 1
+	// LoadBalancerPortMappingFlagsLocalRoutedVIP enables VIP access from the host.
+	LoadBalancerPortMappingFlagsLocalRoutedVIP LoadBalancerPortMappingFlags = 2
+	// LoadBalancerPortMappingFlagsUseMux enables DSR for NodePort access of VIP.
+	LoadBalancerPortMappingFlagsUseMux LoadBalancerPortMappingFlags = 4
+	// LoadBalancerPortMappingFlagsPreserveDIP delivers packets with destination IP as the VIP.
+	LoadBalancerPortMappingFlagsPreserveDIP LoadBalancerPortMappingFlags = 8
+)
+
+// LoadBalancerDistribution specifies how the loadbalancer distributes traffic.
+type LoadBalancerDistribution uint32
+
+var (
+	// LoadBalancerDistributionNone is the default and loadbalances each connection to the same pod.
+	LoadBalancerDistributionNone LoadBalancerDistribution
+	// LoadBalancerDistributionSourceIPProtocol loadbalances all traffic of the same protocol from a client IP to the same pod.
+	LoadBalancerDistributionSourceIPProtocol LoadBalancerDistribution = 1
+	// LoadBalancerDistributionSourceIP loadbalances all traffic from a client IP to the same pod.
+	LoadBalancerDistributionSourceIP LoadBalancerDistribution = 2
+)
+
+func getLoadBalancer(loadBalancerGuid guid.GUID, query string) (*HostComputeLoadBalancer, error) {
+	// Open loadBalancer.
+	var (
+		loadBalancerHandle hcnLoadBalancer
+		resultBuffer       *uint16
+		propertiesBuffer   *uint16
+	)
+	hr := hcnOpenLoadBalancer(&loadBalancerGuid, &loadBalancerHandle, &resultBuffer)
+	if err := checkForErrors("hcnOpenLoadBalancer", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Query loadBalancer.
+	hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, query, &propertiesBuffer, &resultBuffer)
+	if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
+	// Close loadBalancer.
+	hr = hcnCloseLoadBalancer(loadBalancerHandle)
+	if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil {
+		return nil, err
+	}
+	// Convert output to HostComputeLoadBalancer
+	var outputLoadBalancer HostComputeLoadBalancer
+	if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil {
+		return nil, err
+	}
+	return &outputLoadBalancer, nil
+}
+
+func enumerateLoadBalancers(query string) ([]HostComputeLoadBalancer, error) {
+	// Enumerate all LoadBalancer Guids
+	var (
+		resultBuffer       *uint16
+		loadBalancerBuffer *uint16
+	)
+	hr := hcnEnumerateLoadBalancers(query, &loadBalancerBuffer, &resultBuffer)
+	if err := checkForErrors("hcnEnumerateLoadBalancers", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+
+	loadBalancers := interop.ConvertAndFreeCoTaskMemString(loadBalancerBuffer)
+	var loadBalancerIds []guid.GUID
+	if err := json.Unmarshal([]byte(loadBalancers), &loadBalancerIds); err != nil {
+		return nil, err
+	}
+
+	var outputLoadBalancers []HostComputeLoadBalancer
+	for _, loadBalancerGuid := range loadBalancerIds {
+		loadBalancer, err := getLoadBalancer(loadBalancerGuid, query)
+		if err != nil {
+			return nil, err
+		}
+		outputLoadBalancers = append(outputLoadBalancers, *loadBalancer)
+	}
+	return outputLoadBalancers, nil
+}
+
+func createLoadBalancer(settings string) (*HostComputeLoadBalancer, error) {
+	// Create new loadBalancer.
+	var (
+		loadBalancerHandle hcnLoadBalancer
+		resultBuffer       *uint16
+		propertiesBuffer   *uint16
+	)
+	loadBalancerGuid := guid.GUID{}
+	hr := hcnCreateLoadBalancer(&loadBalancerGuid, settings, &loadBalancerHandle, &resultBuffer)
+	if err := checkForErrors("hcnCreateLoadBalancer", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Query loadBalancer.
+	hcnQuery := defaultQuery()
+	query, err := json.Marshal(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, string(query), &propertiesBuffer, &resultBuffer)
+	if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
+	// Close loadBalancer.
+	hr = hcnCloseLoadBalancer(loadBalancerHandle)
+	if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil {
+		return nil, err
+	}
+	// Convert output to HostComputeLoadBalancer
+	var outputLoadBalancer HostComputeLoadBalancer
+	if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil {
+		return nil, err
+	}
+	return &outputLoadBalancer, nil
+}
+
+func deleteLoadBalancer(loadBalancerId string) error {
+	loadBalancerGuid, err := guid.FromString(loadBalancerId)
+	if err != nil {
+		return errInvalidLoadBalancerID
+	}
+	var resultBuffer *uint16
+	hr := hcnDeleteLoadBalancer(&loadBalancerGuid, &resultBuffer)
+	if err := checkForErrors("hcnDeleteLoadBalancer", hr, resultBuffer); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ListLoadBalancers makes a call to list all available loadBalancers.
+func ListLoadBalancers() ([]HostComputeLoadBalancer, error) {
+	hcnQuery := defaultQuery()
+	loadBalancers, err := ListLoadBalancersQuery(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	return loadBalancers, nil
+}
+
+// ListLoadBalancersQuery makes a call to query the list of available loadBalancers.
+func ListLoadBalancersQuery(query HostComputeQuery) ([]HostComputeLoadBalancer, error) {
+	queryJson, err := json.Marshal(query)
+	if err != nil {
+		return nil, err
+	}
+
+	loadBalancers, err := enumerateLoadBalancers(string(queryJson))
+	if err != nil {
+		return nil, err
+	}
+	return loadBalancers, nil
+}
+
+// GetLoadBalancerByID returns the LoadBalancer specified by Id.
+func GetLoadBalancerByID(loadBalancerId string) (*HostComputeLoadBalancer, error) {
+	hcnQuery := defaultQuery()
+	mapA := map[string]string{"ID": loadBalancerId}
+	filter, err := json.Marshal(mapA)
+	if err != nil {
+		return nil, err
+	}
+	hcnQuery.Filter = string(filter)
+
+	loadBalancers, err := ListLoadBalancersQuery(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	if len(loadBalancers) == 0 {
+		return nil, LoadBalancerNotFoundError{LoadBalancerId: loadBalancerId}
+	}
+	return &loadBalancers[0], err
+}
+
+// Create LoadBalancer.
+func (loadBalancer *HostComputeLoadBalancer) Create() (*HostComputeLoadBalancer, error) {
+	logrus.Debugf("hcn::HostComputeLoadBalancer::Create id=%s", loadBalancer.Id)
+
+	jsonString, err := json.Marshal(loadBalancer)
+	if err != nil {
+		return nil, err
+	}
+
+	logrus.Debugf("hcn::HostComputeLoadBalancer::Create JSON: %s", jsonString)
+	loadBalancer, hcnErr := createLoadBalancer(string(jsonString))
+	if hcnErr != nil {
+		return nil, hcnErr
+	}
+	return loadBalancer, nil
+}
+
+// Delete LoadBalancer.
+func (loadBalancer *HostComputeLoadBalancer) Delete() error {
+	logrus.Debugf("hcn::HostComputeLoadBalancer::Delete id=%s", loadBalancer.Id)
+
+	if err := deleteLoadBalancer(loadBalancer.Id); err != nil {
+		return err
+	}
+	return nil
+}
+
+// AddEndpoint add an endpoint to a LoadBalancer
+func (loadBalancer *HostComputeLoadBalancer) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) {
+	logrus.Debugf("hcn::HostComputeLoadBalancer::AddEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id)
+
+	err := loadBalancer.Delete()
+	if err != nil {
+		return nil, err
+	}
+
+	// Add Endpoint to the Existing List
+	loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id)
+
+	return loadBalancer.Create()
+}
+
+// RemoveEndpoint removes an endpoint from a LoadBalancer
+func (loadBalancer *HostComputeLoadBalancer) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) {
+	logrus.Debugf("hcn::HostComputeLoadBalancer::RemoveEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id)
+
+	err := loadBalancer.Delete()
+	if err != nil {
+		return nil, err
+	}
+
+	// Create a list of all the endpoints besides the one being removed
+	var endpoints []string
+	for _, endpointReference := range loadBalancer.HostComputeEndpoints {
+		if endpointReference == endpoint.Id {
+			continue
+		}
+		endpoints = append(endpoints, endpointReference)
+	}
+	loadBalancer.HostComputeEndpoints = endpoints
+	return loadBalancer.Create()
+}
+
+// AddLoadBalancer for the specified endpoints
+func AddLoadBalancer(endpoints []HostComputeEndpoint, flags LoadBalancerFlags, portMappingFlags LoadBalancerPortMappingFlags, sourceVIP string, frontendVIPs []string, protocol uint16, internalPort uint16, externalPort uint16) (*HostComputeLoadBalancer, error) {
+	logrus.Debugf("hcn::HostComputeLoadBalancer::AddLoadBalancer endpointId=%v, LoadBalancerFlags=%v, LoadBalancerPortMappingFlags=%v, sourceVIP=%s, frontendVIPs=%v, protocol=%v, internalPort=%v, externalPort=%v", endpoints, flags, portMappingFlags, sourceVIP, frontendVIPs, protocol, internalPort, externalPort)
+
+	loadBalancer := &HostComputeLoadBalancer{
+		SourceVIP: sourceVIP,
+		PortMappings: []LoadBalancerPortMapping{
+			{
+				Protocol:     uint32(protocol),
+				InternalPort: internalPort,
+				ExternalPort: externalPort,
+				Flags:        portMappingFlags,
+			},
+		},
+		FrontendVIPs: frontendVIPs,
+		SchemaVersion: SchemaVersion{
+			Major: 2,
+			Minor: 0,
+		},
+		Flags: flags,
+	}
+
+	for _, endpoint := range endpoints {
+		loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id)
+	}
+
+	return loadBalancer.Create()
+}

+ 446 - 0
vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go

@@ -0,0 +1,446 @@
+package hcn
+
+import (
+	"encoding/json"
+	"os"
+	"syscall"
+
+	"github.com/Microsoft/go-winio/pkg/guid"
+	icni "github.com/Microsoft/hcsshim/internal/cni"
+	"github.com/Microsoft/hcsshim/internal/interop"
+	"github.com/Microsoft/hcsshim/internal/regstate"
+	"github.com/Microsoft/hcsshim/internal/runhcs"
+	"github.com/sirupsen/logrus"
+)
+
+// NamespaceResourceEndpoint represents an Endpoint attached to a Namespace.
+type NamespaceResourceEndpoint struct {
+	Id string `json:"ID,"`
+}
+
+// NamespaceResourceContainer represents a Container attached to a Namespace.
+type NamespaceResourceContainer struct {
+	Id string `json:"ID,"`
+}
+
+// NamespaceResourceType determines whether the Namespace resource is a Container or Endpoint.
+type NamespaceResourceType string
+
+var (
+	// NamespaceResourceTypeContainer are contianers associated with a Namespace.
+	NamespaceResourceTypeContainer NamespaceResourceType = "Container"
+	// NamespaceResourceTypeEndpoint are endpoints associated with a Namespace.
+	NamespaceResourceTypeEndpoint NamespaceResourceType = "Endpoint"
+)
+
+// NamespaceResource is associated with a namespace
+type NamespaceResource struct {
+	Type NamespaceResourceType `json:","` // Container, Endpoint
+	Data json.RawMessage       `json:","`
+}
+
+// NamespaceType determines whether the Namespace is for a Host or Guest
+type NamespaceType string
+
+var (
+	// NamespaceTypeHost are host namespaces.
+	NamespaceTypeHost NamespaceType = "Host"
+	// NamespaceTypeHostDefault are host namespaces in the default compartment.
+	NamespaceTypeHostDefault NamespaceType = "HostDefault"
+	// NamespaceTypeGuest are guest namespaces.
+	NamespaceTypeGuest NamespaceType = "Guest"
+	// NamespaceTypeGuestDefault are guest namespaces in the default compartment.
+	NamespaceTypeGuestDefault NamespaceType = "GuestDefault"
+)
+
+// HostComputeNamespace represents a namespace (AKA compartment) in
+type HostComputeNamespace struct {
+	Id            string              `json:"ID,omitempty"`
+	NamespaceId   uint32              `json:",omitempty"`
+	Type          NamespaceType       `json:",omitempty"` // Host, HostDefault, Guest, GuestDefault
+	Resources     []NamespaceResource `json:",omitempty"`
+	SchemaVersion SchemaVersion       `json:",omitempty"`
+}
+
+// ModifyNamespaceSettingRequest is the structure used to send request to modify a namespace.
+// Used to Add/Remove an endpoints and containers to/from a namespace.
+type ModifyNamespaceSettingRequest struct {
+	ResourceType NamespaceResourceType `json:",omitempty"` // Container, Endpoint
+	RequestType  RequestType           `json:",omitempty"` // Add, Remove, Update, Refresh
+	Settings     json.RawMessage       `json:",omitempty"`
+}
+
+func getNamespace(namespaceGuid guid.GUID, query string) (*HostComputeNamespace, error) {
+	// Open namespace.
+	var (
+		namespaceHandle  hcnNamespace
+		resultBuffer     *uint16
+		propertiesBuffer *uint16
+	)
+	hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer)
+	if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Query namespace.
+	hr = hcnQueryNamespaceProperties(namespaceHandle, query, &propertiesBuffer, &resultBuffer)
+	if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
+	// Close namespace.
+	hr = hcnCloseNamespace(namespaceHandle)
+	if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil {
+		return nil, err
+	}
+	// Convert output to HostComputeNamespace
+	var outputNamespace HostComputeNamespace
+	if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil {
+		return nil, err
+	}
+	return &outputNamespace, nil
+}
+
+func enumerateNamespaces(query string) ([]HostComputeNamespace, error) {
+	// Enumerate all Namespace Guids
+	var (
+		resultBuffer    *uint16
+		namespaceBuffer *uint16
+	)
+	hr := hcnEnumerateNamespaces(query, &namespaceBuffer, &resultBuffer)
+	if err := checkForErrors("hcnEnumerateNamespaces", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+
+	namespaces := interop.ConvertAndFreeCoTaskMemString(namespaceBuffer)
+	var namespaceIds []guid.GUID
+	if err := json.Unmarshal([]byte(namespaces), &namespaceIds); err != nil {
+		return nil, err
+	}
+
+	var outputNamespaces []HostComputeNamespace
+	for _, namespaceGuid := range namespaceIds {
+		namespace, err := getNamespace(namespaceGuid, query)
+		if err != nil {
+			return nil, err
+		}
+		outputNamespaces = append(outputNamespaces, *namespace)
+	}
+	return outputNamespaces, nil
+}
+
+func createNamespace(settings string) (*HostComputeNamespace, error) {
+	// Create new namespace.
+	var (
+		namespaceHandle  hcnNamespace
+		resultBuffer     *uint16
+		propertiesBuffer *uint16
+	)
+	namespaceGuid := guid.GUID{}
+	hr := hcnCreateNamespace(&namespaceGuid, settings, &namespaceHandle, &resultBuffer)
+	if err := checkForErrors("hcnCreateNamespace", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Query namespace.
+	hcnQuery := defaultQuery()
+	query, err := json.Marshal(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer)
+	if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
+	// Close namespace.
+	hr = hcnCloseNamespace(namespaceHandle)
+	if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil {
+		return nil, err
+	}
+	// Convert output to HostComputeNamespace
+	var outputNamespace HostComputeNamespace
+	if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil {
+		return nil, err
+	}
+	return &outputNamespace, nil
+}
+
+func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace, error) {
+	namespaceGuid, err := guid.FromString(namespaceId)
+	if err != nil {
+		return nil, errInvalidNamespaceID
+	}
+	// Open namespace.
+	var (
+		namespaceHandle  hcnNamespace
+		resultBuffer     *uint16
+		propertiesBuffer *uint16
+	)
+	hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer)
+	if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Modify namespace.
+	hr = hcnModifyNamespace(namespaceHandle, settings, &resultBuffer)
+	if err := checkForErrors("hcnModifyNamespace", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Query namespace.
+	hcnQuery := defaultQuery()
+	query, err := json.Marshal(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer)
+	if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
+	// Close namespace.
+	hr = hcnCloseNamespace(namespaceHandle)
+	if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil {
+		return nil, err
+	}
+	// Convert output to Namespace
+	var outputNamespace HostComputeNamespace
+	if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil {
+		return nil, err
+	}
+	return &outputNamespace, nil
+}
+
+func deleteNamespace(namespaceId string) error {
+	namespaceGuid, err := guid.FromString(namespaceId)
+	if err != nil {
+		return errInvalidNamespaceID
+	}
+	var resultBuffer *uint16
+	hr := hcnDeleteNamespace(&namespaceGuid, &resultBuffer)
+	if err := checkForErrors("hcnDeleteNamespace", hr, resultBuffer); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ListNamespaces makes a call to list all available namespaces.
+func ListNamespaces() ([]HostComputeNamespace, error) {
+	hcnQuery := defaultQuery()
+	namespaces, err := ListNamespacesQuery(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	return namespaces, nil
+}
+
+// ListNamespacesQuery makes a call to query the list of available namespaces.
+func ListNamespacesQuery(query HostComputeQuery) ([]HostComputeNamespace, error) {
+	queryJson, err := json.Marshal(query)
+	if err != nil {
+		return nil, err
+	}
+
+	namespaces, err := enumerateNamespaces(string(queryJson))
+	if err != nil {
+		return nil, err
+	}
+	return namespaces, nil
+}
+
+// GetNamespaceByID returns the Namespace specified by Id.
+func GetNamespaceByID(namespaceId string) (*HostComputeNamespace, error) {
+	hcnQuery := defaultQuery()
+	mapA := map[string]string{"ID": namespaceId}
+	filter, err := json.Marshal(mapA)
+	if err != nil {
+		return nil, err
+	}
+	hcnQuery.Filter = string(filter)
+
+	namespaces, err := ListNamespacesQuery(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	if len(namespaces) == 0 {
+		return nil, NamespaceNotFoundError{NamespaceID: namespaceId}
+	}
+
+	return &namespaces[0], err
+}
+
+// GetNamespaceEndpointIds returns the endpoints of the Namespace specified by Id.
+func GetNamespaceEndpointIds(namespaceId string) ([]string, error) {
+	namespace, err := GetNamespaceByID(namespaceId)
+	if err != nil {
+		return nil, err
+	}
+	var endpointsIds []string
+	for _, resource := range namespace.Resources {
+		if resource.Type == "Endpoint" {
+			var endpointResource NamespaceResourceEndpoint
+			if err := json.Unmarshal([]byte(resource.Data), &endpointResource); err != nil {
+				return nil, err
+			}
+			endpointsIds = append(endpointsIds, endpointResource.Id)
+		}
+	}
+	return endpointsIds, nil
+}
+
+// GetNamespaceContainerIds returns the containers of the Namespace specified by Id.
+func GetNamespaceContainerIds(namespaceId string) ([]string, error) {
+	namespace, err := GetNamespaceByID(namespaceId)
+	if err != nil {
+		return nil, err
+	}
+	var containerIds []string
+	for _, resource := range namespace.Resources {
+		if resource.Type == "Container" {
+			var contaienrResource NamespaceResourceContainer
+			if err := json.Unmarshal([]byte(resource.Data), &contaienrResource); err != nil {
+				return nil, err
+			}
+			containerIds = append(containerIds, contaienrResource.Id)
+		}
+	}
+	return containerIds, nil
+}
+
+// NewNamespace creates a new Namespace object
+func NewNamespace(nsType NamespaceType) *HostComputeNamespace {
+	return &HostComputeNamespace{
+		Type:          nsType,
+		SchemaVersion: V2SchemaVersion(),
+	}
+}
+
+// Create Namespace.
+func (namespace *HostComputeNamespace) Create() (*HostComputeNamespace, error) {
+	logrus.Debugf("hcn::HostComputeNamespace::Create id=%s", namespace.Id)
+
+	jsonString, err := json.Marshal(namespace)
+	if err != nil {
+		return nil, err
+	}
+
+	logrus.Debugf("hcn::HostComputeNamespace::Create JSON: %s", jsonString)
+	namespace, hcnErr := createNamespace(string(jsonString))
+	if hcnErr != nil {
+		return nil, hcnErr
+	}
+	return namespace, nil
+}
+
+// Delete Namespace.
+func (namespace *HostComputeNamespace) Delete() error {
+	logrus.Debugf("hcn::HostComputeNamespace::Delete id=%s", namespace.Id)
+
+	if err := deleteNamespace(namespace.Id); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Sync Namespace endpoints with the appropriate sandbox container holding the
+// network namespace open. If no sandbox container is found for this namespace
+// this method is determined to be a success and will not return an error in
+// this case. If the sandbox container is found and a sync is initiated any
+// failures will be returned via this method.
+//
+// This call initiates a sync between endpoints and the matching UtilityVM
+// hosting those endpoints. It is safe to call for any `NamespaceType` but
+// `NamespaceTypeGuest` is the only case when a sync will actually occur. For
+// `NamespaceTypeHost` the process container will be automatically synchronized
+// when the the endpoint is added via `AddNamespaceEndpoint`.
+//
+// Note: This method sync's both additions and removals of endpoints from a
+// `NamespaceTypeGuest` namespace.
+func (namespace *HostComputeNamespace) Sync() error {
+	logrus.WithField("id", namespace.Id).Debugf("hcs::HostComputeNamespace::Sync")
+
+	// We only attempt a sync for namespace guest.
+	if namespace.Type != NamespaceTypeGuest {
+		return nil
+	}
+
+	// Look in the registry for the key to map from namespace id to pod-id
+	cfg, err := icni.LoadPersistedNamespaceConfig(namespace.Id)
+	if err != nil {
+		if regstate.IsNotFoundError(err) {
+			return nil
+		}
+		return err
+	}
+	req := runhcs.VMRequest{
+		ID: cfg.ContainerID,
+		Op: runhcs.OpSyncNamespace,
+	}
+	shimPath := runhcs.VMPipePath(cfg.HostUniqueID)
+	if err := runhcs.IssueVMRequest(shimPath, &req); err != nil {
+		// The shim is likey gone. Simply ignore the sync as if it didn't exist.
+		if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND {
+			// Remove the reg key there is no point to try again
+			_ = cfg.Remove()
+			return nil
+		}
+		f := map[string]interface{}{
+			"id":           namespace.Id,
+			"container-id": cfg.ContainerID,
+		}
+		logrus.WithFields(f).
+			WithError(err).
+			Debugf("hcs::HostComputeNamespace::Sync failed to connect to shim pipe: '%s'", shimPath)
+		return err
+	}
+	return nil
+}
+
+// ModifyNamespaceSettings updates the Endpoints/Containers of a Namespace.
+func ModifyNamespaceSettings(namespaceId string, request *ModifyNamespaceSettingRequest) error {
+	logrus.Debugf("hcn::HostComputeNamespace::ModifyNamespaceSettings id=%s", namespaceId)
+
+	namespaceSettings, err := json.Marshal(request)
+	if err != nil {
+		return err
+	}
+
+	_, err = modifyNamespace(namespaceId, string(namespaceSettings))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// AddNamespaceEndpoint adds an endpoint to a Namespace.
+func AddNamespaceEndpoint(namespaceId string, endpointId string) error {
+	logrus.Debugf("hcn::HostComputeEndpoint::AddNamespaceEndpoint id=%s", endpointId)
+
+	mapA := map[string]string{"EndpointId": endpointId}
+	settingsJson, err := json.Marshal(mapA)
+	if err != nil {
+		return err
+	}
+	requestMessage := &ModifyNamespaceSettingRequest{
+		ResourceType: NamespaceResourceTypeEndpoint,
+		RequestType:  RequestTypeAdd,
+		Settings:     settingsJson,
+	}
+
+	return ModifyNamespaceSettings(namespaceId, requestMessage)
+}
+
+// RemoveNamespaceEndpoint removes an endpoint from a Namespace.
+func RemoveNamespaceEndpoint(namespaceId string, endpointId string) error {
+	logrus.Debugf("hcn::HostComputeNamespace::RemoveNamespaceEndpoint id=%s", endpointId)
+
+	mapA := map[string]string{"EndpointId": endpointId}
+	settingsJson, err := json.Marshal(mapA)
+	if err != nil {
+		return err
+	}
+	requestMessage := &ModifyNamespaceSettingRequest{
+		ResourceType: NamespaceResourceTypeEndpoint,
+		RequestType:  RequestTypeRemove,
+		Settings:     settingsJson,
+	}
+
+	return ModifyNamespaceSettings(namespaceId, requestMessage)
+}

+ 462 - 0
vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go

@@ -0,0 +1,462 @@
+package hcn
+
+import (
+	"encoding/json"
+	"errors"
+
+	"github.com/Microsoft/go-winio/pkg/guid"
+	"github.com/Microsoft/hcsshim/internal/interop"
+	"github.com/sirupsen/logrus"
+)
+
+// Route is associated with a subnet.
+type Route struct {
+	NextHop           string `json:",omitempty"`
+	DestinationPrefix string `json:",omitempty"`
+	Metric            uint16 `json:",omitempty"`
+}
+
+// Subnet is associated with a Ipam.
+type Subnet struct {
+	IpAddressPrefix string            `json:",omitempty"`
+	Policies        []json.RawMessage `json:",omitempty"`
+	Routes          []Route           `json:",omitempty"`
+}
+
+// Ipam (Internet Protocol Address Management) is associated with a network
+// and represents the address space(s) of a network.
+type Ipam struct {
+	Type    string   `json:",omitempty"` // Ex: Static, DHCP
+	Subnets []Subnet `json:",omitempty"`
+}
+
+// MacRange is associated with MacPool and respresents the start and end addresses.
+type MacRange struct {
+	StartMacAddress string `json:",omitempty"`
+	EndMacAddress   string `json:",omitempty"`
+}
+
+// MacPool is associated with a network and represents pool of MacRanges.
+type MacPool struct {
+	Ranges []MacRange `json:",omitempty"`
+}
+
+// Dns (Domain Name System is associated with a network).
+type Dns struct {
+	Domain     string   `json:",omitempty"`
+	Search     []string `json:",omitempty"`
+	ServerList []string `json:",omitempty"`
+	Options    []string `json:",omitempty"`
+}
+
+// NetworkType are various networks.
+type NetworkType string
+
+// NetworkType const
+const (
+	NAT         NetworkType = "NAT"
+	Transparent NetworkType = "Transparent"
+	L2Bridge    NetworkType = "L2Bridge"
+	L2Tunnel    NetworkType = "L2Tunnel"
+	ICS         NetworkType = "ICS"
+	Private     NetworkType = "Private"
+	Overlay     NetworkType = "Overlay"
+)
+
+// NetworkFlags are various network flags.
+type NetworkFlags uint32
+
+// NetworkFlags const
+const (
+	None                NetworkFlags = 0
+	EnableNonPersistent NetworkFlags = 8
+)
+
+// HostComputeNetwork represents a network
+type HostComputeNetwork struct {
+	Id            string          `json:"ID,omitempty"`
+	Name          string          `json:",omitempty"`
+	Type          NetworkType     `json:",omitempty"`
+	Policies      []NetworkPolicy `json:",omitempty"`
+	MacPool       MacPool         `json:",omitempty"`
+	Dns           Dns             `json:",omitempty"`
+	Ipams         []Ipam          `json:",omitempty"`
+	Flags         NetworkFlags    `json:",omitempty"` // 0: None
+	Health        Health          `json:",omitempty"`
+	SchemaVersion SchemaVersion   `json:",omitempty"`
+}
+
+// NetworkResourceType are the 3 different Network settings resources.
+type NetworkResourceType string
+
+var (
+	// NetworkResourceTypePolicy is for Network's policies. Ex: RemoteSubnet
+	NetworkResourceTypePolicy NetworkResourceType = "Policy"
+	// NetworkResourceTypeDNS is for Network's DNS settings.
+	NetworkResourceTypeDNS NetworkResourceType = "DNS"
+	// NetworkResourceTypeExtension is for Network's extension settings.
+	NetworkResourceTypeExtension NetworkResourceType = "Extension"
+)
+
+// ModifyNetworkSettingRequest is the structure used to send request to modify an network.
+// Used to update DNS/extension/policy on an network.
+type ModifyNetworkSettingRequest struct {
+	ResourceType NetworkResourceType `json:",omitempty"` // Policy, DNS, Extension
+	RequestType  RequestType         `json:",omitempty"` // Add, Remove, Update, Refresh
+	Settings     json.RawMessage     `json:",omitempty"`
+}
+
+type PolicyNetworkRequest struct {
+	Policies []NetworkPolicy `json:",omitempty"`
+}
+
+func getNetwork(networkGuid guid.GUID, query string) (*HostComputeNetwork, error) {
+	// Open network.
+	var (
+		networkHandle    hcnNetwork
+		resultBuffer     *uint16
+		propertiesBuffer *uint16
+	)
+	hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer)
+	if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Query network.
+	hr = hcnQueryNetworkProperties(networkHandle, query, &propertiesBuffer, &resultBuffer)
+	if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
+	// Close network.
+	hr = hcnCloseNetwork(networkHandle)
+	if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil {
+		return nil, err
+	}
+	// Convert output to HostComputeNetwork
+	var outputNetwork HostComputeNetwork
+
+	// If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode),
+	// the value will be omitted from the JSON blob. We therefore need to initialize NAT here before
+	// unmarshaling the JSON blob.
+	outputNetwork.Type = NAT
+
+	if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil {
+		return nil, err
+	}
+	return &outputNetwork, nil
+}
+
+func enumerateNetworks(query string) ([]HostComputeNetwork, error) {
+	// Enumerate all Network Guids
+	var (
+		resultBuffer  *uint16
+		networkBuffer *uint16
+	)
+	hr := hcnEnumerateNetworks(query, &networkBuffer, &resultBuffer)
+	if err := checkForErrors("hcnEnumerateNetworks", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+
+	networks := interop.ConvertAndFreeCoTaskMemString(networkBuffer)
+	var networkIds []guid.GUID
+	if err := json.Unmarshal([]byte(networks), &networkIds); err != nil {
+		return nil, err
+	}
+
+	var outputNetworks []HostComputeNetwork
+	for _, networkGuid := range networkIds {
+		network, err := getNetwork(networkGuid, query)
+		if err != nil {
+			return nil, err
+		}
+		outputNetworks = append(outputNetworks, *network)
+	}
+	return outputNetworks, nil
+}
+
+func createNetwork(settings string) (*HostComputeNetwork, error) {
+	// Create new network.
+	var (
+		networkHandle    hcnNetwork
+		resultBuffer     *uint16
+		propertiesBuffer *uint16
+	)
+	networkGuid := guid.GUID{}
+	hr := hcnCreateNetwork(&networkGuid, settings, &networkHandle, &resultBuffer)
+	if err := checkForErrors("hcnCreateNetwork", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Query network.
+	hcnQuery := defaultQuery()
+	query, err := json.Marshal(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer)
+	if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
+	// Close network.
+	hr = hcnCloseNetwork(networkHandle)
+	if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil {
+		return nil, err
+	}
+	// Convert output to HostComputeNetwork
+	var outputNetwork HostComputeNetwork
+
+	// If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode),
+	// the value will be omitted from the JSON blob. We therefore need to initialize NAT here before
+	// unmarshaling the JSON blob.
+	outputNetwork.Type = NAT
+
+	if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil {
+		return nil, err
+	}
+	return &outputNetwork, nil
+}
+
+func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, error) {
+	networkGuid, err := guid.FromString(networkId)
+	if err != nil {
+		return nil, errInvalidNetworkID
+	}
+	// Open Network
+	var (
+		networkHandle    hcnNetwork
+		resultBuffer     *uint16
+		propertiesBuffer *uint16
+	)
+	hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer)
+	if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Modify Network
+	hr = hcnModifyNetwork(networkHandle, settings, &resultBuffer)
+	if err := checkForErrors("hcnModifyNetwork", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Query network.
+	hcnQuery := defaultQuery()
+	query, err := json.Marshal(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer)
+	if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
+	// Close network.
+	hr = hcnCloseNetwork(networkHandle)
+	if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil {
+		return nil, err
+	}
+	// Convert output to HostComputeNetwork
+	var outputNetwork HostComputeNetwork
+
+	// If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode),
+	// the value will be omitted from the JSON blob. We therefore need to initialize NAT here before
+	// unmarshaling the JSON blob.
+	outputNetwork.Type = NAT
+
+	if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil {
+		return nil, err
+	}
+	return &outputNetwork, nil
+}
+
+func deleteNetwork(networkId string) error {
+	networkGuid, err := guid.FromString(networkId)
+	if err != nil {
+		return errInvalidNetworkID
+	}
+	var resultBuffer *uint16
+	hr := hcnDeleteNetwork(&networkGuid, &resultBuffer)
+	if err := checkForErrors("hcnDeleteNetwork", hr, resultBuffer); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ListNetworks makes a call to list all available networks.
+func ListNetworks() ([]HostComputeNetwork, error) {
+	hcnQuery := defaultQuery()
+	networks, err := ListNetworksQuery(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	return networks, nil
+}
+
+// ListNetworksQuery makes a call to query the list of available networks.
+func ListNetworksQuery(query HostComputeQuery) ([]HostComputeNetwork, error) {
+	queryJson, err := json.Marshal(query)
+	if err != nil {
+		return nil, err
+	}
+
+	networks, err := enumerateNetworks(string(queryJson))
+	if err != nil {
+		return nil, err
+	}
+	return networks, nil
+}
+
+// GetNetworkByID returns the network specified by Id.
+func GetNetworkByID(networkID string) (*HostComputeNetwork, error) {
+	hcnQuery := defaultQuery()
+	mapA := map[string]string{"ID": networkID}
+	filter, err := json.Marshal(mapA)
+	if err != nil {
+		return nil, err
+	}
+	hcnQuery.Filter = string(filter)
+
+	networks, err := ListNetworksQuery(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	if len(networks) == 0 {
+		return nil, NetworkNotFoundError{NetworkID: networkID}
+	}
+	return &networks[0], err
+}
+
+// GetNetworkByName returns the network specified by Name.
+func GetNetworkByName(networkName string) (*HostComputeNetwork, error) {
+	hcnQuery := defaultQuery()
+	mapA := map[string]string{"Name": networkName}
+	filter, err := json.Marshal(mapA)
+	if err != nil {
+		return nil, err
+	}
+	hcnQuery.Filter = string(filter)
+
+	networks, err := ListNetworksQuery(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	if len(networks) == 0 {
+		return nil, NetworkNotFoundError{NetworkName: networkName}
+	}
+	return &networks[0], err
+}
+
+// Create Network.
+func (network *HostComputeNetwork) Create() (*HostComputeNetwork, error) {
+	logrus.Debugf("hcn::HostComputeNetwork::Create id=%s", network.Id)
+	for _, ipam := range network.Ipams {
+		for _, subnet := range ipam.Subnets {
+			if subnet.IpAddressPrefix != "" {
+				hasDefault := false
+				for _, route := range subnet.Routes {
+					if route.NextHop == "" {
+						return nil, errors.New("network create error, subnet has address prefix but no gateway specified")
+					}
+					if route.DestinationPrefix == "0.0.0.0/0" || route.DestinationPrefix == "::/0" {
+						hasDefault = true
+					}
+				}
+				if !hasDefault {
+					return nil, errors.New("network create error, no default gateway")
+				}
+			}
+		}
+	}
+
+	jsonString, err := json.Marshal(network)
+	if err != nil {
+		return nil, err
+	}
+
+	logrus.Debugf("hcn::HostComputeNetwork::Create JSON: %s", jsonString)
+	network, hcnErr := createNetwork(string(jsonString))
+	if hcnErr != nil {
+		return nil, hcnErr
+	}
+	return network, nil
+}
+
+// Delete Network.
+func (network *HostComputeNetwork) Delete() error {
+	logrus.Debugf("hcn::HostComputeNetwork::Delete id=%s", network.Id)
+
+	if err := deleteNetwork(network.Id); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ModifyNetworkSettings updates the Policy for a network.
+func (network *HostComputeNetwork) ModifyNetworkSettings(request *ModifyNetworkSettingRequest) error {
+	logrus.Debugf("hcn::HostComputeNetwork::ModifyNetworkSettings id=%s", network.Id)
+
+	networkSettingsRequest, err := json.Marshal(request)
+	if err != nil {
+		return err
+	}
+
+	_, err = modifyNetwork(network.Id, string(networkSettingsRequest))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// AddPolicy applies a Policy (ex: RemoteSubnet) on the Network.
+func (network *HostComputeNetwork) AddPolicy(networkPolicy PolicyNetworkRequest) error {
+	logrus.Debugf("hcn::HostComputeNetwork::AddPolicy id=%s", network.Id)
+
+	settingsJson, err := json.Marshal(networkPolicy)
+	if err != nil {
+		return err
+	}
+	requestMessage := &ModifyNetworkSettingRequest{
+		ResourceType: NetworkResourceTypePolicy,
+		RequestType:  RequestTypeAdd,
+		Settings:     settingsJson,
+	}
+
+	return network.ModifyNetworkSettings(requestMessage)
+}
+
+// RemovePolicy removes a Policy (ex: RemoteSubnet) from the Network.
+func (network *HostComputeNetwork) RemovePolicy(networkPolicy PolicyNetworkRequest) error {
+	logrus.Debugf("hcn::HostComputeNetwork::RemovePolicy id=%s", network.Id)
+
+	settingsJson, err := json.Marshal(networkPolicy)
+	if err != nil {
+		return err
+	}
+	requestMessage := &ModifyNetworkSettingRequest{
+		ResourceType: NetworkResourceTypePolicy,
+		RequestType:  RequestTypeRemove,
+		Settings:     settingsJson,
+	}
+
+	return network.ModifyNetworkSettings(requestMessage)
+}
+
+// CreateEndpoint creates an endpoint on the Network.
+func (network *HostComputeNetwork) CreateEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) {
+	isRemote := endpoint.Flags&EndpointFlagsRemoteEndpoint != 0
+	logrus.Debugf("hcn::HostComputeNetwork::CreatEndpoint, networkId=%s remote=%t", network.Id, isRemote)
+
+	endpoint.HostComputeNetwork = network.Id
+	endpointSettings, err := json.Marshal(endpoint)
+	if err != nil {
+		return nil, err
+	}
+	newEndpoint, err := createEndpoint(network.Id, string(endpointSettings))
+	if err != nil {
+		return nil, err
+	}
+	return newEndpoint, nil
+}
+
+// CreateRemoteEndpoint creates a remote endpoint on the Network.
+func (network *HostComputeNetwork) CreateRemoteEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) {
+	endpoint.Flags = EndpointFlagsRemoteEndpoint | endpoint.Flags
+	return network.CreateEndpoint(endpoint)
+}

+ 344 - 0
vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go

@@ -0,0 +1,344 @@
+package hcn
+
+import (
+	"encoding/json"
+)
+
+// EndpointPolicyType are the potential Policies that apply to Endpoints.
+type EndpointPolicyType string
+
+// EndpointPolicyType const
+const (
+	PortMapping   EndpointPolicyType = "PortMapping"
+	ACL           EndpointPolicyType = "ACL"
+	QOS           EndpointPolicyType = "QOS"
+	L2Driver      EndpointPolicyType = "L2Driver"
+	OutBoundNAT   EndpointPolicyType = "OutBoundNAT"
+	SDNRoute      EndpointPolicyType = "SDNRoute"
+	L4Proxy       EndpointPolicyType = "L4Proxy"
+	L4WFPPROXY    EndpointPolicyType = "L4WFPPROXY"
+	PortName      EndpointPolicyType = "PortName"
+	EncapOverhead EndpointPolicyType = "EncapOverhead"
+	IOV           EndpointPolicyType = "Iov"
+	// Endpoint and Network have InterfaceConstraint and ProviderAddress
+	NetworkProviderAddress     EndpointPolicyType = "ProviderAddress"
+	NetworkInterfaceConstraint EndpointPolicyType = "InterfaceConstraint"
+	TierAcl       EndpointPolicyType = "TierAcl"	
+)
+
+// EndpointPolicy is a collection of Policy settings for an Endpoint.
+type EndpointPolicy struct {
+	Type     EndpointPolicyType `json:""`
+	Settings json.RawMessage    `json:",omitempty"`
+}
+
+// NetworkPolicyType are the potential Policies that apply to Networks.
+type NetworkPolicyType string
+
+// NetworkPolicyType const
+const (
+	SourceMacAddress    NetworkPolicyType = "SourceMacAddress"
+	NetAdapterName      NetworkPolicyType = "NetAdapterName"
+	VSwitchExtension    NetworkPolicyType = "VSwitchExtension"
+	DrMacAddress        NetworkPolicyType = "DrMacAddress"
+	AutomaticDNS        NetworkPolicyType = "AutomaticDNS"
+	InterfaceConstraint NetworkPolicyType = "InterfaceConstraint"
+	ProviderAddress     NetworkPolicyType = "ProviderAddress"
+	RemoteSubnetRoute   NetworkPolicyType = "RemoteSubnetRoute"
+	VxlanPort           NetworkPolicyType = "VxlanPort"
+	HostRoute           NetworkPolicyType = "HostRoute"
+	SetPolicy           NetworkPolicyType = "SetPolicy"
+	NetworkL4Proxy      NetworkPolicyType = "L4Proxy"
+	LayerConstraint     NetworkPolicyType = "LayerConstraint"
+	NetworkACL          NetworkPolicyType = "NetworkACL"
+)
+
+// NetworkPolicy is a collection of Policy settings for a Network.
+type NetworkPolicy struct {
+	Type     NetworkPolicyType `json:""`
+	Settings json.RawMessage   `json:",omitempty"`
+}
+
+// SubnetPolicyType are the potential Policies that apply to Subnets.
+type SubnetPolicyType string
+
+// SubnetPolicyType const
+const (
+	VLAN SubnetPolicyType = "VLAN"
+	VSID SubnetPolicyType = "VSID"
+)
+
+// SubnetPolicy is a collection of Policy settings for a Subnet.
+type SubnetPolicy struct {
+	Type     SubnetPolicyType `json:""`
+	Settings json.RawMessage  `json:",omitempty"`
+}
+
+// NatFlags are flags for portmappings.
+type NatFlags uint32
+
+const (
+	NatFlagsNone NatFlags = iota
+	NatFlagsLocalRoutedVip
+	NatFlagsIPv6
+)
+
+/// Endpoint Policy objects
+
+// PortMappingPolicySetting defines Port Mapping (NAT)
+type PortMappingPolicySetting struct {
+	Protocol     uint32   `json:",omitempty"` // EX: TCP = 6, UDP = 17
+	InternalPort uint16   `json:",omitempty"`
+	ExternalPort uint16   `json:",omitempty"`
+	VIP          string   `json:",omitempty"`
+	Flags        NatFlags `json:",omitempty"`
+}
+
+// ActionType associated with ACLs. Value is either Allow or Block.
+type ActionType string
+
+// DirectionType associated with ACLs. Value is either In or Out.
+type DirectionType string
+
+// RuleType associated with ACLs. Value is either Host (WFP) or Switch (VFP).
+type RuleType string
+
+const (
+	// Allow traffic
+	ActionTypeAllow ActionType = "Allow"
+	// Block traffic
+	ActionTypeBlock ActionType = "Block"
+	// Pass traffic
+	ActionTypePass ActionType = "Pass"
+
+	// In is traffic coming to the Endpoint
+	DirectionTypeIn DirectionType = "In"
+	// Out is traffic leaving the Endpoint
+	DirectionTypeOut DirectionType = "Out"
+
+	// Host creates WFP (Windows Firewall) rules
+	RuleTypeHost RuleType = "Host"
+	// Switch creates VFP (Virtual Filter Platform) rules
+	RuleTypeSwitch RuleType = "Switch"
+)
+
+// AclPolicySetting creates firewall rules on an endpoint
+type AclPolicySetting struct {
+	Protocols       string        `json:",omitempty"` // EX: 6 (TCP), 17 (UDP), 1 (ICMPv4), 58 (ICMPv6), 2 (IGMP)
+	Action          ActionType    `json:","`
+	Direction       DirectionType `json:","`
+	LocalAddresses  string        `json:",omitempty"`
+	RemoteAddresses string        `json:",omitempty"`
+	LocalPorts      string        `json:",omitempty"`
+	RemotePorts     string        `json:",omitempty"`
+	RuleType        RuleType      `json:",omitempty"`
+	Priority        uint16        `json:",omitempty"`
+} 
+
+// QosPolicySetting sets Quality of Service bandwidth caps on an Endpoint.
+type QosPolicySetting struct {
+	MaximumOutgoingBandwidthInBytes uint64
+}
+
+// OutboundNatPolicySetting sets outbound Network Address Translation on an Endpoint.
+type OutboundNatPolicySetting struct {
+	VirtualIP    string   `json:",omitempty"`
+	Exceptions   []string `json:",omitempty"`
+	Destinations []string `json:",omitempty"`
+	Flags        NatFlags `json:",omitempty"`
+}
+
+// SDNRoutePolicySetting sets SDN Route on an Endpoint.
+type SDNRoutePolicySetting struct {
+	DestinationPrefix string `json:",omitempty"`
+	NextHop           string `json:",omitempty"`
+	NeedEncap         bool   `json:",omitempty"`
+}
+
+// NetworkACLPolicySetting creates ACL rules on a network
+type NetworkACLPolicySetting struct {
+	Protocols       string        `json:",omitempty"` // EX: 6 (TCP), 17 (UDP), 1 (ICMPv4), 58 (ICMPv6), 2 (IGMP)
+	Action          ActionType    `json:","`
+	Direction       DirectionType `json:","`
+	LocalAddresses  string        `json:",omitempty"`
+	RemoteAddresses string        `json:",omitempty"`
+	LocalPorts      string        `json:",omitempty"`
+	RemotePorts     string        `json:",omitempty"`
+	RuleType        RuleType      `json:",omitempty"`
+	Priority        uint16        `json:",omitempty"`
+} 
+
+// FiveTuple is nested in L4ProxyPolicySetting  for WFP support.
+type FiveTuple struct {
+	Protocols       string `json:",omitempty"`
+	LocalAddresses  string `json:",omitempty"`
+	RemoteAddresses string `json:",omitempty"`
+	LocalPorts      string `json:",omitempty"`
+	RemotePorts     string `json:",omitempty"`
+	Priority        uint16 `json:",omitempty"`
+}
+
+// ProxyExceptions exempts traffic to IpAddresses and Ports
+type ProxyExceptions struct {
+	IpAddressExceptions []string `json:",omitempty"`
+	PortExceptions      []string `json:",omitempty"`
+}
+
+// L4WfpProxyPolicySetting sets Layer-4 Proxy on an endpoint.
+type L4WfpProxyPolicySetting struct {
+	InboundProxyPort   string          `json:",omitempty"`
+	OutboundProxyPort  string          `json:",omitempty"`
+	FilterTuple        FiveTuple       `json:",omitempty"`
+	UserSID            string          `json:",omitempty"`
+	InboundExceptions  ProxyExceptions `json:",omitempty"`
+	OutboundExceptions ProxyExceptions `json:",omitempty"`
+}
+
+// PortnameEndpointPolicySetting sets the port name for an endpoint.
+type PortnameEndpointPolicySetting struct {
+	Name string `json:",omitempty"`
+}
+
+// EncapOverheadEndpointPolicySetting sets the encap overhead for an endpoint.
+type EncapOverheadEndpointPolicySetting struct {
+	Overhead uint16 `json:",omitempty"`
+}
+
+// IovPolicySetting sets the Iov settings for an endpoint.
+type IovPolicySetting struct {
+	IovOffloadWeight    uint32 `json:",omitempty"`
+	QueuePairsRequested uint32 `json:",omitempty"`
+	InterruptModeration uint32 `json:",omitempty"`
+}
+
+/// Endpoint and Network Policy objects
+
+// ProviderAddressEndpointPolicySetting sets the PA for an endpoint.
+type ProviderAddressEndpointPolicySetting struct {
+	ProviderAddress string `json:",omitempty"`
+}
+
+// InterfaceConstraintPolicySetting limits an Endpoint or Network to a specific Nic.
+type InterfaceConstraintPolicySetting struct {
+	InterfaceGuid        string `json:",omitempty"`
+	InterfaceLuid        uint64 `json:",omitempty"`
+	InterfaceIndex       uint32 `json:",omitempty"`
+	InterfaceMediaType   uint32 `json:",omitempty"`
+	InterfaceAlias       string `json:",omitempty"`
+	InterfaceDescription string `json:",omitempty"`
+}
+
+/// Network Policy objects
+
+// SourceMacAddressNetworkPolicySetting sets source MAC for a network.
+type SourceMacAddressNetworkPolicySetting struct {
+	SourceMacAddress string `json:",omitempty"`
+}
+
+// NetAdapterNameNetworkPolicySetting sets network adapter of a network.
+type NetAdapterNameNetworkPolicySetting struct {
+	NetworkAdapterName string `json:",omitempty"`
+}
+
+// VSwitchExtensionNetworkPolicySetting enables/disabled VSwitch extensions for a network.
+type VSwitchExtensionNetworkPolicySetting struct {
+	ExtensionID string `json:",omitempty"`
+	Enable      bool   `json:",omitempty"`
+}
+
+// DrMacAddressNetworkPolicySetting sets the DR MAC for a network.
+type DrMacAddressNetworkPolicySetting struct {
+	Address string `json:",omitempty"`
+}
+
+// AutomaticDNSNetworkPolicySetting enables/disables automatic DNS on a network.
+type AutomaticDNSNetworkPolicySetting struct {
+	Enable bool `json:",omitempty"`
+}
+
+type LayerConstraintNetworkPolicySetting struct {
+	LayerId string `json:",omitempty"`
+}
+
+/// Subnet Policy objects
+
+// VlanPolicySetting isolates a subnet with VLAN tagging.
+type VlanPolicySetting struct {
+	IsolationId uint32 `json:","`
+}
+
+// VsidPolicySetting isolates a subnet with VSID tagging.
+type VsidPolicySetting struct {
+	IsolationId uint32 `json:","`
+}
+
+// RemoteSubnetRoutePolicySetting creates remote subnet route rules on a network
+type RemoteSubnetRoutePolicySetting struct {
+	DestinationPrefix           string
+	IsolationId                 uint16
+	ProviderAddress             string
+	DistributedRouterMacAddress string
+}
+
+// SetPolicyTypes associated with SetPolicy. Value is IPSET.
+type SetPolicyType string
+
+const (
+	SetPolicyTypeIpSet SetPolicyType = "IPSET"
+	SetPolicyTypeNestedIpSet SetPolicyType = "NESTEDIPSET"
+)
+
+// SetPolicySetting creates IPSets on network
+type SetPolicySetting struct {
+	Id     string
+	Name   string
+	Type   SetPolicyType
+	Values string
+}
+
+// VxlanPortPolicySetting allows configuring the VXLAN TCP port
+type VxlanPortPolicySetting struct {
+	Port uint16
+}
+
+// ProtocolType associated with L4ProxyPolicy
+type ProtocolType uint32
+
+const (
+	ProtocolTypeUnknown ProtocolType = 0
+	ProtocolTypeICMPv4  ProtocolType = 1
+	ProtocolTypeIGMP    ProtocolType = 2
+	ProtocolTypeTCP     ProtocolType = 6
+	ProtocolTypeUDP     ProtocolType = 17
+	ProtocolTypeICMPv6  ProtocolType = 58
+)
+
+//L4ProxyPolicySetting applies proxy policy on network/endpoint
+type L4ProxyPolicySetting struct {
+	IP          string       `json:",omitempty"`
+	Port        string       `json:",omitempty"`
+	Protocol    ProtocolType `json:",omitempty"`
+	Exceptions  []string     `json:",omitempty"`
+	Destination string
+	OutboundNAT bool         `json:",omitempty"`
+}
+
+// TierAclRule represents an ACL within TierAclPolicySetting
+type TierAclRule struct {
+	Id                string        `json:",omitempty"`
+	Protocols         string        `json:",omitempty"`
+	TierAclRuleAction ActionType    `json:","`
+	LocalAddresses    string        `json:",omitempty"`
+	RemoteAddresses   string        `json:",omitempty"`
+	LocalPorts        string        `json:",omitempty"`
+	RemotePorts       string        `json:",omitempty"`
+	Priority          uint16        `json:",omitempty"`
+}
+
+// TierAclPolicySetting represents a Tier containing ACLs
+type TierAclPolicySetting struct {
+	Name            string         `json:","`
+	Direction       DirectionType  `json:","`
+	Order           uint16         `json:""`
+	TierAclRules    []TierAclRule  `json:",omitempty"`
+}

+ 266 - 0
vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go

@@ -0,0 +1,266 @@
+package hcn
+
+import (
+	"encoding/json"
+	"errors"
+
+	"github.com/Microsoft/go-winio/pkg/guid"
+	"github.com/Microsoft/hcsshim/internal/interop"
+	"github.com/sirupsen/logrus"
+)
+
+// HostComputeRoute represents SDN routes.
+type HostComputeRoute struct {
+	ID                   string                  `json:"ID,omitempty"`
+	HostComputeEndpoints []string                `json:",omitempty"`
+	Setting              []SDNRoutePolicySetting `json:",omitempty"`
+	SchemaVersion        SchemaVersion           `json:",omitempty"`
+}
+
+// ListRoutes makes a call to list all available routes.
+func ListRoutes() ([]HostComputeRoute, error) {
+	hcnQuery := defaultQuery()
+	routes, err := ListRoutesQuery(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	return routes, nil
+}
+
+// ListRoutesQuery makes a call to query the list of available routes.
+func ListRoutesQuery(query HostComputeQuery) ([]HostComputeRoute, error) {
+	queryJSON, err := json.Marshal(query)
+	if err != nil {
+		return nil, err
+	}
+
+	routes, err := enumerateRoutes(string(queryJSON))
+	if err != nil {
+		return nil, err
+	}
+	return routes, nil
+}
+
+// GetRouteByID returns the route specified by Id.
+func GetRouteByID(routeID string) (*HostComputeRoute, error) {
+	hcnQuery := defaultQuery()
+	mapA := map[string]string{"ID": routeID}
+	filter, err := json.Marshal(mapA)
+	if err != nil {
+		return nil, err
+	}
+	hcnQuery.Filter = string(filter)
+
+	routes, err := ListRoutesQuery(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	if len(routes) == 0 {
+		return nil, RouteNotFoundError{RouteId: routeID}
+	}
+	return &routes[0], err
+}
+
+// Create Route.
+func (route *HostComputeRoute) Create() (*HostComputeRoute, error) {
+	logrus.Debugf("hcn::HostComputeRoute::Create id=%s", route.ID)
+
+	jsonString, err := json.Marshal(route)
+	if err != nil {
+		return nil, err
+	}
+
+	logrus.Debugf("hcn::HostComputeRoute::Create JSON: %s", jsonString)
+	route, hcnErr := createRoute(string(jsonString))
+	if hcnErr != nil {
+		return nil, hcnErr
+	}
+	return route, nil
+}
+
+// Delete Route.
+func (route *HostComputeRoute) Delete() error {
+	logrus.Debugf("hcn::HostComputeRoute::Delete id=%s", route.ID)
+
+	existingRoute, _ := GetRouteByID(route.ID)
+
+	if existingRoute != nil {
+		if err := deleteRoute(route.ID); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// AddEndpoint add an endpoint to a route
+// Since HCNRoute doesn't implement modify functionality, add operation is essentially delete and add
+func (route *HostComputeRoute) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) {
+	logrus.Debugf("hcn::HostComputeRoute::AddEndpoint route=%s endpoint=%s", route.ID, endpoint.Id)
+
+	err := route.Delete()
+	if err != nil {
+		return nil, err
+	}
+
+	// Add Endpoint to the Existing List
+	route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id)
+
+	return route.Create()
+}
+
+// RemoveEndpoint removes an endpoint from a route
+// Since HCNRoute doesn't implement modify functionality, remove operation is essentially delete and add
+func (route *HostComputeRoute) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) {
+	logrus.Debugf("hcn::HostComputeRoute::RemoveEndpoint route=%s endpoint=%s", route.ID, endpoint.Id)
+
+	err := route.Delete()
+	if err != nil {
+		return nil, err
+	}
+
+	// Create a list of all the endpoints besides the one being removed
+	i := 0
+	for index, endpointReference := range route.HostComputeEndpoints {
+		if endpointReference == endpoint.Id {
+			i = index
+			break
+		}
+	}
+
+	route.HostComputeEndpoints = append(route.HostComputeEndpoints[0:i], route.HostComputeEndpoints[i+1:]...)
+	return route.Create()
+}
+
+// AddRoute for the specified endpoints and SDN Route setting
+func AddRoute(endpoints []HostComputeEndpoint, destinationPrefix string, nextHop string, needEncapsulation bool) (*HostComputeRoute, error) {
+	logrus.Debugf("hcn::HostComputeRoute::AddRoute endpointId=%v, destinationPrefix=%v, nextHop=%v, needEncapsulation=%v", endpoints, destinationPrefix, nextHop, needEncapsulation)
+
+	if len(endpoints) <= 0 {
+		return nil, errors.New("missing endpoints")
+	}
+
+	route := &HostComputeRoute{
+		SchemaVersion: V2SchemaVersion(),
+		Setting: []SDNRoutePolicySetting{
+			{
+				DestinationPrefix: destinationPrefix,
+				NextHop:           nextHop,
+				NeedEncap:         needEncapsulation,
+			},
+		},
+	}
+
+	for _, endpoint := range endpoints {
+		route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id)
+	}
+
+	return route.Create()
+}
+
+func enumerateRoutes(query string) ([]HostComputeRoute, error) {
+	// Enumerate all routes Guids
+	var (
+		resultBuffer *uint16
+		routeBuffer  *uint16
+	)
+	hr := hcnEnumerateRoutes(query, &routeBuffer, &resultBuffer)
+	if err := checkForErrors("hcnEnumerateRoutes", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+
+	routes := interop.ConvertAndFreeCoTaskMemString(routeBuffer)
+	var routeIds []guid.GUID
+	if err := json.Unmarshal([]byte(routes), &routeIds); err != nil {
+		return nil, err
+	}
+
+	var outputRoutes []HostComputeRoute
+	for _, routeGUID := range routeIds {
+		route, err := getRoute(routeGUID, query)
+		if err != nil {
+			return nil, err
+		}
+		outputRoutes = append(outputRoutes, *route)
+	}
+	return outputRoutes, nil
+}
+
+func getRoute(routeGUID guid.GUID, query string) (*HostComputeRoute, error) {
+	// Open routes.
+	var (
+		routeHandle      hcnRoute
+		resultBuffer     *uint16
+		propertiesBuffer *uint16
+	)
+	hr := hcnOpenRoute(&routeGUID, &routeHandle, &resultBuffer)
+	if err := checkForErrors("hcnOpenRoute", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Query routes.
+	hr = hcnQueryRouteProperties(routeHandle, query, &propertiesBuffer, &resultBuffer)
+	if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
+	// Close routes.
+	hr = hcnCloseRoute(routeHandle)
+	if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil {
+		return nil, err
+	}
+	// Convert output to HostComputeRoute
+	var outputRoute HostComputeRoute
+	if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil {
+		return nil, err
+	}
+	return &outputRoute, nil
+}
+
+func createRoute(settings string) (*HostComputeRoute, error) {
+	// Create new route.
+	var (
+		routeHandle      hcnRoute
+		resultBuffer     *uint16
+		propertiesBuffer *uint16
+	)
+	routeGUID := guid.GUID{}
+	hr := hcnCreateRoute(&routeGUID, settings, &routeHandle, &resultBuffer)
+	if err := checkForErrors("hcnCreateRoute", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	// Query route.
+	hcnQuery := defaultQuery()
+	query, err := json.Marshal(hcnQuery)
+	if err != nil {
+		return nil, err
+	}
+	hr = hcnQueryRouteProperties(routeHandle, string(query), &propertiesBuffer, &resultBuffer)
+	if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil {
+		return nil, err
+	}
+	properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer)
+	// Close Route.
+	hr = hcnCloseRoute(routeHandle)
+	if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil {
+		return nil, err
+	}
+	// Convert output to HostComputeRoute
+	var outputRoute HostComputeRoute
+	if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil {
+		return nil, err
+	}
+	return &outputRoute, nil
+}
+
+func deleteRoute(routeID string) error {
+	routeGUID, err := guid.FromString(routeID)
+	if err != nil {
+		return errInvalidRouteID
+	}
+	var resultBuffer *uint16
+	hr := hcnDeleteRoute(&routeGUID, &resultBuffer)
+	if err := checkForErrors("hcnDeleteRoute", hr, resultBuffer); err != nil {
+		return err
+	}
+	return nil
+}

+ 147 - 0
vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go

@@ -0,0 +1,147 @@
+package hcn
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+)
+
+var (
+	// featuresOnce handles assigning the supported features and printing the supported info to stdout only once to avoid unnecessary work
+	// multiple times.
+	featuresOnce      sync.Once
+	featuresErr       error
+	supportedFeatures SupportedFeatures
+)
+
+// SupportedFeatures are the features provided by the Service.
+type SupportedFeatures struct {
+	Acl                      AclFeatures `json:"ACL"`
+	Api                      ApiSupport  `json:"API"`
+	RemoteSubnet             bool        `json:"RemoteSubnet"`
+	HostRoute                bool        `json:"HostRoute"`
+	DSR                      bool        `json:"DSR"`
+	Slash32EndpointPrefixes  bool        `json:"Slash32EndpointPrefixes"`
+	AclSupportForProtocol252 bool        `json:"AclSupportForProtocol252"`
+	SessionAffinity          bool        `json:"SessionAffinity"`
+	IPv6DualStack            bool        `json:"IPv6DualStack"`
+	SetPolicy                bool        `json:"SetPolicy"`
+	VxlanPort                bool        `json:"VxlanPort"`
+	L4Proxy                  bool        `json:"L4Proxy"`    // network policy that applies VFP rules to all endpoints on the network to redirect traffic
+	L4WfpProxy               bool        `json:"L4WfpProxy"` // endpoint policy that applies WFP filters to redirect traffic to/from that endpoint
+	TierAcl                  bool        `json:"TierAcl"`
+	NetworkACL               bool        `json:"NetworkACL"`
+	NestedIpSet              bool        `json:"NestedIpSet"`
+}
+
+// AclFeatures are the supported ACL possibilities.
+type AclFeatures struct {
+	AclAddressLists       bool `json:"AclAddressLists"`
+	AclNoHostRulePriority bool `json:"AclHostRulePriority"`
+	AclPortRanges         bool `json:"AclPortRanges"`
+	AclRuleId             bool `json:"AclRuleId"`
+}
+
+// ApiSupport lists the supported API versions.
+type ApiSupport struct {
+	V1 bool `json:"V1"`
+	V2 bool `json:"V2"`
+}
+
+// GetCachedSupportedFeatures returns the features supported by the Service and an error if the query failed. If this has been called
+// before it will return the supported features and error received from the first call. This can be used to optimize if many calls to the
+// various hcn.IsXSupported methods need to be made.
+func GetCachedSupportedFeatures() (SupportedFeatures, error) {
+	// Only query the HCN version and features supported once, instead of everytime this is invoked. The logs are useful to
+	// debug incidents where there's confusion on if a feature is supported on the host machine. The sync.Once helps to avoid redundant
+	// spam of these anytime a check needs to be made for if an HCN feature is supported. This is a common occurrence in kube-proxy
+	// for example.
+	featuresOnce.Do(func() {
+		supportedFeatures, featuresErr = getSupportedFeatures()
+	})
+
+	return supportedFeatures, featuresErr
+}
+
+// GetSupportedFeatures returns the features supported by the Service.
+//
+// Deprecated: Use GetCachedSupportedFeatures instead.
+func GetSupportedFeatures() SupportedFeatures {
+	features, err := GetCachedSupportedFeatures()
+	if err != nil {
+		// Expected on pre-1803 builds, all features will be false/unsupported
+		logrus.WithError(err).Errorf("unable to obtain supported features")
+		return features
+	}
+	return features
+}
+
+func getSupportedFeatures() (SupportedFeatures, error) {
+	var features SupportedFeatures
+	globals, err := GetGlobals()
+	if err != nil {
+		// It's expected if this fails once, it should always fail. It should fail on pre 1803 builds for example.
+		return SupportedFeatures{}, errors.Wrap(err, "failed to query HCN version number: this is expected on pre 1803 builds.")
+	}
+	features.Acl = AclFeatures{
+		AclAddressLists:       isFeatureSupported(globals.Version, HNSVersion1803),
+		AclNoHostRulePriority: isFeatureSupported(globals.Version, HNSVersion1803),
+		AclPortRanges:         isFeatureSupported(globals.Version, HNSVersion1803),
+		AclRuleId:             isFeatureSupported(globals.Version, HNSVersion1803),
+	}
+
+	features.Api = ApiSupport{
+		V2: isFeatureSupported(globals.Version, V2ApiSupport),
+		V1: true, // HNSCall is still available.
+	}
+
+	features.RemoteSubnet = isFeatureSupported(globals.Version, RemoteSubnetVersion)
+	features.HostRoute = isFeatureSupported(globals.Version, HostRouteVersion)
+	features.DSR = isFeatureSupported(globals.Version, DSRVersion)
+	features.Slash32EndpointPrefixes = isFeatureSupported(globals.Version, Slash32EndpointPrefixesVersion)
+	features.AclSupportForProtocol252 = isFeatureSupported(globals.Version, AclSupportForProtocol252Version)
+	features.SessionAffinity = isFeatureSupported(globals.Version, SessionAffinityVersion)
+	features.IPv6DualStack = isFeatureSupported(globals.Version, IPv6DualStackVersion)
+	features.SetPolicy = isFeatureSupported(globals.Version, SetPolicyVersion)
+	features.VxlanPort = isFeatureSupported(globals.Version, VxlanPortVersion)
+	features.L4Proxy = isFeatureSupported(globals.Version, L4ProxyPolicyVersion)
+	features.L4WfpProxy = isFeatureSupported(globals.Version, L4WfpProxyPolicyVersion)
+	features.TierAcl = isFeatureSupported(globals.Version, TierAclPolicyVersion)
+	features.NetworkACL = isFeatureSupported(globals.Version, NetworkACLPolicyVersion)
+	features.NestedIpSet = isFeatureSupported(globals.Version, NestedIpSetVersion)
+
+	logrus.WithFields(logrus.Fields{
+		"version":           fmt.Sprintf("%+v", globals.Version),
+		"supportedFeatures": fmt.Sprintf("%+v", features),
+	}).Info("HCN feature check")
+
+	return features, nil
+}
+
+func isFeatureSupported(currentVersion Version, versionsSupported VersionRanges) bool {
+	isFeatureSupported := false
+
+	for _, versionRange := range versionsSupported {
+		isFeatureSupported = isFeatureSupported || isFeatureInRange(currentVersion, versionRange)
+	}
+
+	return isFeatureSupported
+}
+
+func isFeatureInRange(currentVersion Version, versionRange VersionRange) bool {
+	if currentVersion.Major < versionRange.MinVersion.Major {
+		return false
+	}
+	if currentVersion.Major > versionRange.MaxVersion.Major {
+		return false
+	}
+	if currentVersion.Major == versionRange.MinVersion.Major && currentVersion.Minor < versionRange.MinVersion.Minor {
+		return false
+	}
+	if currentVersion.Major == versionRange.MaxVersion.Major && currentVersion.Minor > versionRange.MaxVersion.Minor {
+		return false
+	}
+	return true
+}

+ 795 - 0
vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go

@@ -0,0 +1,795 @@
+// Code generated mksyscall_windows.exe DO NOT EDIT
+
+package hcn
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+	errnoERROR_IO_PENDING = 997
+)
+
+var (
+	errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+	switch e {
+	case 0:
+		return nil
+	case errnoERROR_IO_PENDING:
+		return errERROR_IO_PENDING
+	}
+	// TODO: add more here, after collecting data on the common
+	// error values see on Windows. (perhaps when running
+	// all.bat?)
+	return e
+}
+
+var (
+	modiphlpapi       = windows.NewLazySystemDLL("iphlpapi.dll")
+	modvmcompute      = windows.NewLazySystemDLL("vmcompute.dll")
+	modcomputenetwork = windows.NewLazySystemDLL("computenetwork.dll")
+
+	procSetCurrentThreadCompartmentId  = modiphlpapi.NewProc("SetCurrentThreadCompartmentId")
+	procHNSCall                        = modvmcompute.NewProc("HNSCall")
+	procHcnEnumerateNetworks           = modcomputenetwork.NewProc("HcnEnumerateNetworks")
+	procHcnCreateNetwork               = modcomputenetwork.NewProc("HcnCreateNetwork")
+	procHcnOpenNetwork                 = modcomputenetwork.NewProc("HcnOpenNetwork")
+	procHcnModifyNetwork               = modcomputenetwork.NewProc("HcnModifyNetwork")
+	procHcnQueryNetworkProperties      = modcomputenetwork.NewProc("HcnQueryNetworkProperties")
+	procHcnDeleteNetwork               = modcomputenetwork.NewProc("HcnDeleteNetwork")
+	procHcnCloseNetwork                = modcomputenetwork.NewProc("HcnCloseNetwork")
+	procHcnEnumerateEndpoints          = modcomputenetwork.NewProc("HcnEnumerateEndpoints")
+	procHcnCreateEndpoint              = modcomputenetwork.NewProc("HcnCreateEndpoint")
+	procHcnOpenEndpoint                = modcomputenetwork.NewProc("HcnOpenEndpoint")
+	procHcnModifyEndpoint              = modcomputenetwork.NewProc("HcnModifyEndpoint")
+	procHcnQueryEndpointProperties     = modcomputenetwork.NewProc("HcnQueryEndpointProperties")
+	procHcnDeleteEndpoint              = modcomputenetwork.NewProc("HcnDeleteEndpoint")
+	procHcnCloseEndpoint               = modcomputenetwork.NewProc("HcnCloseEndpoint")
+	procHcnEnumerateNamespaces         = modcomputenetwork.NewProc("HcnEnumerateNamespaces")
+	procHcnCreateNamespace             = modcomputenetwork.NewProc("HcnCreateNamespace")
+	procHcnOpenNamespace               = modcomputenetwork.NewProc("HcnOpenNamespace")
+	procHcnModifyNamespace             = modcomputenetwork.NewProc("HcnModifyNamespace")
+	procHcnQueryNamespaceProperties    = modcomputenetwork.NewProc("HcnQueryNamespaceProperties")
+	procHcnDeleteNamespace             = modcomputenetwork.NewProc("HcnDeleteNamespace")
+	procHcnCloseNamespace              = modcomputenetwork.NewProc("HcnCloseNamespace")
+	procHcnEnumerateLoadBalancers      = modcomputenetwork.NewProc("HcnEnumerateLoadBalancers")
+	procHcnCreateLoadBalancer          = modcomputenetwork.NewProc("HcnCreateLoadBalancer")
+	procHcnOpenLoadBalancer            = modcomputenetwork.NewProc("HcnOpenLoadBalancer")
+	procHcnModifyLoadBalancer          = modcomputenetwork.NewProc("HcnModifyLoadBalancer")
+	procHcnQueryLoadBalancerProperties = modcomputenetwork.NewProc("HcnQueryLoadBalancerProperties")
+	procHcnDeleteLoadBalancer          = modcomputenetwork.NewProc("HcnDeleteLoadBalancer")
+	procHcnCloseLoadBalancer           = modcomputenetwork.NewProc("HcnCloseLoadBalancer")
+	procHcnEnumerateSdnRoutes          = modcomputenetwork.NewProc("HcnEnumerateSdnRoutes")
+	procHcnCreateSdnRoute              = modcomputenetwork.NewProc("HcnCreateSdnRoute")
+	procHcnOpenSdnRoute                = modcomputenetwork.NewProc("HcnOpenSdnRoute")
+	procHcnModifySdnRoute              = modcomputenetwork.NewProc("HcnModifySdnRoute")
+	procHcnQuerySdnRouteProperties     = modcomputenetwork.NewProc("HcnQuerySdnRouteProperties")
+	procHcnDeleteSdnRoute              = modcomputenetwork.NewProc("HcnDeleteSdnRoute")
+	procHcnCloseSdnRoute               = modcomputenetwork.NewProc("HcnCloseSdnRoute")
+)
+
+func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) {
+	r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func _hnsCall(method string, path string, object string, response **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(method)
+	if hr != nil {
+		return
+	}
+	var _p1 *uint16
+	_p1, hr = syscall.UTF16PtrFromString(path)
+	if hr != nil {
+		return
+	}
+	var _p2 *uint16
+	_p2, hr = syscall.UTF16PtrFromString(object)
+	if hr != nil {
+		return
+	}
+	return __hnsCall(_p0, _p1, _p2, response)
+}
+
+func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) {
+	if hr = procHNSCall.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(query)
+	if hr != nil {
+		return
+	}
+	return _hcnEnumerateNetworks(_p0, networks, result)
+}
+
+func _hcnEnumerateNetworks(query *uint16, networks **uint16, result **uint16) (hr error) {
+	if hr = procHcnEnumerateNetworks.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnEnumerateNetworks.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(networks)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(settings)
+	if hr != nil {
+		return
+	}
+	return _hcnCreateNetwork(id, _p0, network, result)
+}
+
+func _hcnCreateNetwork(id *_guid, settings *uint16, network *hcnNetwork, result **uint16) (hr error) {
+	if hr = procHcnCreateNetwork.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall6(procHcnCreateNetwork.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result)), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) {
+	if hr = procHcnOpenNetwork.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnOpenNetwork.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(settings)
+	if hr != nil {
+		return
+	}
+	return _hcnModifyNetwork(network, _p0, result)
+}
+
+func _hcnModifyNetwork(network hcnNetwork, settings *uint16, result **uint16) (hr error) {
+	if hr = procHcnModifyNetwork.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnModifyNetwork.Addr(), 3, uintptr(network), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(query)
+	if hr != nil {
+		return
+	}
+	return _hcnQueryNetworkProperties(network, _p0, properties, result)
+}
+
+func _hcnQueryNetworkProperties(network hcnNetwork, query *uint16, properties **uint16, result **uint16) (hr error) {
+	if hr = procHcnQueryNetworkProperties.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall6(procHcnQueryNetworkProperties.Addr(), 4, uintptr(network), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnDeleteNetwork(id *_guid, result **uint16) (hr error) {
+	if hr = procHcnDeleteNetwork.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnDeleteNetwork.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnCloseNetwork(network hcnNetwork) (hr error) {
+	if hr = procHcnCloseNetwork.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnCloseNetwork.Addr(), 1, uintptr(network), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(query)
+	if hr != nil {
+		return
+	}
+	return _hcnEnumerateEndpoints(_p0, endpoints, result)
+}
+
+func _hcnEnumerateEndpoints(query *uint16, endpoints **uint16, result **uint16) (hr error) {
+	if hr = procHcnEnumerateEndpoints.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnEnumerateEndpoints.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(endpoints)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(settings)
+	if hr != nil {
+		return
+	}
+	return _hcnCreateEndpoint(network, id, _p0, endpoint, result)
+}
+
+func _hcnCreateEndpoint(network hcnNetwork, id *_guid, settings *uint16, endpoint *hcnEndpoint, result **uint16) (hr error) {
+	if hr = procHcnCreateEndpoint.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall6(procHcnCreateEndpoint.Addr(), 5, uintptr(network), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result)), 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) {
+	if hr = procHcnOpenEndpoint.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnOpenEndpoint.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(settings)
+	if hr != nil {
+		return
+	}
+	return _hcnModifyEndpoint(endpoint, _p0, result)
+}
+
+func _hcnModifyEndpoint(endpoint hcnEndpoint, settings *uint16, result **uint16) (hr error) {
+	if hr = procHcnModifyEndpoint.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnModifyEndpoint.Addr(), 3, uintptr(endpoint), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(query)
+	if hr != nil {
+		return
+	}
+	return _hcnQueryEndpointProperties(endpoint, _p0, properties, result)
+}
+
+func _hcnQueryEndpointProperties(endpoint hcnEndpoint, query *uint16, properties **uint16, result **uint16) (hr error) {
+	if hr = procHcnQueryEndpointProperties.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall6(procHcnQueryEndpointProperties.Addr(), 4, uintptr(endpoint), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) {
+	if hr = procHcnDeleteEndpoint.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnDeleteEndpoint.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) {
+	if hr = procHcnCloseEndpoint.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnCloseEndpoint.Addr(), 1, uintptr(endpoint), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(query)
+	if hr != nil {
+		return
+	}
+	return _hcnEnumerateNamespaces(_p0, namespaces, result)
+}
+
+func _hcnEnumerateNamespaces(query *uint16, namespaces **uint16, result **uint16) (hr error) {
+	if hr = procHcnEnumerateNamespaces.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnEnumerateNamespaces.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(namespaces)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(settings)
+	if hr != nil {
+		return
+	}
+	return _hcnCreateNamespace(id, _p0, namespace, result)
+}
+
+func _hcnCreateNamespace(id *_guid, settings *uint16, namespace *hcnNamespace, result **uint16) (hr error) {
+	if hr = procHcnCreateNamespace.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall6(procHcnCreateNamespace.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result)), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) {
+	if hr = procHcnOpenNamespace.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnOpenNamespace.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(settings)
+	if hr != nil {
+		return
+	}
+	return _hcnModifyNamespace(namespace, _p0, result)
+}
+
+func _hcnModifyNamespace(namespace hcnNamespace, settings *uint16, result **uint16) (hr error) {
+	if hr = procHcnModifyNamespace.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnModifyNamespace.Addr(), 3, uintptr(namespace), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(query)
+	if hr != nil {
+		return
+	}
+	return _hcnQueryNamespaceProperties(namespace, _p0, properties, result)
+}
+
+func _hcnQueryNamespaceProperties(namespace hcnNamespace, query *uint16, properties **uint16, result **uint16) (hr error) {
+	if hr = procHcnQueryNamespaceProperties.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall6(procHcnQueryNamespaceProperties.Addr(), 4, uintptr(namespace), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnDeleteNamespace(id *_guid, result **uint16) (hr error) {
+	if hr = procHcnDeleteNamespace.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnDeleteNamespace.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnCloseNamespace(namespace hcnNamespace) (hr error) {
+	if hr = procHcnCloseNamespace.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnCloseNamespace.Addr(), 1, uintptr(namespace), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(query)
+	if hr != nil {
+		return
+	}
+	return _hcnEnumerateLoadBalancers(_p0, loadBalancers, result)
+}
+
+func _hcnEnumerateLoadBalancers(query *uint16, loadBalancers **uint16, result **uint16) (hr error) {
+	if hr = procHcnEnumerateLoadBalancers.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnEnumerateLoadBalancers.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(loadBalancers)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(settings)
+	if hr != nil {
+		return
+	}
+	return _hcnCreateLoadBalancer(id, _p0, loadBalancer, result)
+}
+
+func _hcnCreateLoadBalancer(id *_guid, settings *uint16, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) {
+	if hr = procHcnCreateLoadBalancer.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall6(procHcnCreateLoadBalancer.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result)), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) {
+	if hr = procHcnOpenLoadBalancer.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnOpenLoadBalancer.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(settings)
+	if hr != nil {
+		return
+	}
+	return _hcnModifyLoadBalancer(loadBalancer, _p0, result)
+}
+
+func _hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings *uint16, result **uint16) (hr error) {
+	if hr = procHcnModifyLoadBalancer.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnModifyLoadBalancer.Addr(), 3, uintptr(loadBalancer), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(query)
+	if hr != nil {
+		return
+	}
+	return _hcnQueryLoadBalancerProperties(loadBalancer, _p0, properties, result)
+}
+
+func _hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query *uint16, properties **uint16, result **uint16) (hr error) {
+	if hr = procHcnQueryLoadBalancerProperties.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall6(procHcnQueryLoadBalancerProperties.Addr(), 4, uintptr(loadBalancer), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) {
+	if hr = procHcnDeleteLoadBalancer.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnDeleteLoadBalancer.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) {
+	if hr = procHcnCloseLoadBalancer.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnCloseLoadBalancer.Addr(), 1, uintptr(loadBalancer), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(query)
+	if hr != nil {
+		return
+	}
+	return _hcnEnumerateRoutes(_p0, routes, result)
+}
+
+func _hcnEnumerateRoutes(query *uint16, routes **uint16, result **uint16) (hr error) {
+	if hr = procHcnEnumerateSdnRoutes.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnEnumerateSdnRoutes.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(routes)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(settings)
+	if hr != nil {
+		return
+	}
+	return _hcnCreateRoute(id, _p0, route, result)
+}
+
+func _hcnCreateRoute(id *_guid, settings *uint16, route *hcnRoute, result **uint16) (hr error) {
+	if hr = procHcnCreateSdnRoute.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall6(procHcnCreateSdnRoute.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result)), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) {
+	if hr = procHcnOpenSdnRoute.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnOpenSdnRoute.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(settings)
+	if hr != nil {
+		return
+	}
+	return _hcnModifyRoute(route, _p0, result)
+}
+
+func _hcnModifyRoute(route hcnRoute, settings *uint16, result **uint16) (hr error) {
+	if hr = procHcnModifySdnRoute.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnModifySdnRoute.Addr(), 3, uintptr(route), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) {
+	var _p0 *uint16
+	_p0, hr = syscall.UTF16PtrFromString(query)
+	if hr != nil {
+		return
+	}
+	return _hcnQueryRouteProperties(route, _p0, properties, result)
+}
+
+func _hcnQueryRouteProperties(route hcnRoute, query *uint16, properties **uint16, result **uint16) (hr error) {
+	if hr = procHcnQuerySdnRouteProperties.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall6(procHcnQuerySdnRouteProperties.Addr(), 4, uintptr(route), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnDeleteRoute(id *_guid, result **uint16) (hr error) {
+	if hr = procHcnDeleteSdnRoute.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnDeleteSdnRoute.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func hcnCloseRoute(route hcnRoute) (hr error) {
+	if hr = procHcnCloseSdnRoute.Find(); hr != nil {
+		return
+	}
+	r0, _, _ := syscall.Syscall(procHcnCloseSdnRoute.Addr(), 1, uintptr(route), 0, 0)
+	if int32(r0) < 0 {
+		if r0&0x1fff0000 == 0x00070000 {
+			r0 &= 0xffff
+		}
+		hr = syscall.Errno(r0)
+	}
+	return
+}

+ 110 - 0
vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go

@@ -0,0 +1,110 @@
+package cni
+
+import (
+	"errors"
+
+	"github.com/Microsoft/go-winio/pkg/guid"
+	"github.com/Microsoft/hcsshim/internal/regstate"
+)
+
+const (
+	cniRoot = "cni"
+	cniKey  = "cfg"
+)
+
+// PersistedNamespaceConfig is the registry version of the `NamespaceID` to UVM
+// map.
+type PersistedNamespaceConfig struct {
+	namespaceID string
+	stored      bool
+
+	ContainerID  string
+	HostUniqueID guid.GUID
+}
+
+// NewPersistedNamespaceConfig creates an in-memory namespace config that can be
+// persisted to the registry.
+func NewPersistedNamespaceConfig(namespaceID, containerID string, containerHostUniqueID guid.GUID) *PersistedNamespaceConfig {
+	return &PersistedNamespaceConfig{
+		namespaceID:  namespaceID,
+		ContainerID:  containerID,
+		HostUniqueID: containerHostUniqueID,
+	}
+}
+
+// LoadPersistedNamespaceConfig loads a persisted config from the registry that matches
+// `namespaceID`. If not found returns `regstate.NotFoundError`
+func LoadPersistedNamespaceConfig(namespaceID string) (*PersistedNamespaceConfig, error) {
+	sk, err := regstate.Open(cniRoot, false)
+	if err != nil {
+		return nil, err
+	}
+	defer sk.Close()
+
+	pnc := PersistedNamespaceConfig{
+		namespaceID: namespaceID,
+		stored:      true,
+	}
+	if err := sk.Get(namespaceID, cniKey, &pnc); err != nil {
+		return nil, err
+	}
+	return &pnc, nil
+}
+
+// Store stores or updates the in-memory config to its registry state. If the
+// store failes returns the store error.
+func (pnc *PersistedNamespaceConfig) Store() error {
+	if pnc.namespaceID == "" {
+		return errors.New("invalid namespaceID ''")
+	}
+	if pnc.ContainerID == "" {
+		return errors.New("invalid containerID ''")
+	}
+	empty := guid.GUID{}
+	if pnc.HostUniqueID == empty {
+		return errors.New("invalid containerHostUniqueID 'empy'")
+	}
+	sk, err := regstate.Open(cniRoot, false)
+	if err != nil {
+		return err
+	}
+	defer sk.Close()
+
+	if pnc.stored {
+		if err := sk.Set(pnc.namespaceID, cniKey, pnc); err != nil {
+			return err
+		}
+	} else {
+		if err := sk.Create(pnc.namespaceID, cniKey, pnc); err != nil {
+			return err
+		}
+	}
+	pnc.stored = true
+	return nil
+}
+
+// Remove removes any persisted state associated with this config. If the config
+// is not found in the registery `Remove` returns no error.
+func (pnc *PersistedNamespaceConfig) Remove() error {
+	if pnc.stored {
+		sk, err := regstate.Open(cniRoot, false)
+		if err != nil {
+			if regstate.IsNotFoundError(err) {
+				pnc.stored = false
+				return nil
+			}
+			return err
+		}
+		defer sk.Close()
+
+		if err := sk.Remove(pnc.namespaceID); err != nil {
+			if regstate.IsNotFoundError(err) {
+				pnc.stored = false
+				return nil
+			}
+			return err
+		}
+	}
+	pnc.stored = false
+	return nil
+}

+ 288 - 0
vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go

@@ -0,0 +1,288 @@
+package regstate
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"os"
+	"path/filepath"
+	"reflect"
+	"syscall"
+
+	"golang.org/x/sys/windows"
+	"golang.org/x/sys/windows/registry"
+)
+
+//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go regstate.go
+
+//sys	regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW
+
+const (
+	_REG_OPTION_VOLATILE = 1
+
+	_REG_OPENED_EXISTING_KEY = 2
+)
+
+type Key struct {
+	registry.Key
+	Name string
+}
+
+var localMachine = &Key{registry.LOCAL_MACHINE, "HKEY_LOCAL_MACHINE"}
+var localUser = &Key{registry.CURRENT_USER, "HKEY_CURRENT_USER"}
+
+var rootPath = `SOFTWARE\Microsoft\runhcs`
+
+type NotFoundError struct {
+	ID string
+}
+
+func (err *NotFoundError) Error() string {
+	return fmt.Sprintf("ID '%s' was not found", err.ID)
+}
+
+func IsNotFoundError(err error) bool {
+	_, ok := err.(*NotFoundError)
+	return ok
+}
+
+type NoStateError struct {
+	ID  string
+	Key string
+}
+
+func (err *NoStateError) Error() string {
+	return fmt.Sprintf("state '%s' is not present for ID '%s'", err.Key, err.ID)
+}
+
+func createVolatileKey(k *Key, path string, access uint32) (newk *Key, openedExisting bool, err error) {
+	var (
+		h syscall.Handle
+		d uint32
+	)
+	fullpath := filepath.Join(k.Name, path)
+	pathPtr, _ := windows.UTF16PtrFromString(path)
+	err = regCreateKeyEx(syscall.Handle(k.Key), pathPtr, 0, nil, _REG_OPTION_VOLATILE, access, nil, &h, &d)
+	if err != nil {
+		return nil, false, &os.PathError{Op: "RegCreateKeyEx", Path: fullpath, Err: err}
+	}
+	return &Key{registry.Key(h), fullpath}, d == _REG_OPENED_EXISTING_KEY, nil
+}
+
+func hive(perUser bool) *Key {
+	r := localMachine
+	if perUser {
+		r = localUser
+	}
+	return r
+}
+
+func Open(root string, perUser bool) (*Key, error) {
+	k, _, err := createVolatileKey(hive(perUser), rootPath, registry.ALL_ACCESS)
+	if err != nil {
+		return nil, err
+	}
+	defer k.Close()
+
+	k2, _, err := createVolatileKey(k, url.PathEscape(root), registry.ALL_ACCESS)
+	if err != nil {
+		return nil, err
+	}
+	return k2, nil
+}
+
+func RemoveAll(root string, perUser bool) error {
+	k, err := hive(perUser).open(rootPath)
+	if err != nil {
+		return err
+	}
+	defer k.Close()
+	r, err := k.open(url.PathEscape(root))
+	if err != nil {
+		return err
+	}
+	defer r.Close()
+	ids, err := r.Enumerate()
+	if err != nil {
+		return err
+	}
+	for _, id := range ids {
+		err = r.Remove(id)
+		if err != nil {
+			return err
+		}
+	}
+	r.Close()
+	return k.Remove(root)
+}
+
+func (k *Key) Close() error {
+	err := k.Key.Close()
+	k.Key = 0
+	return err
+}
+
+func (k *Key) Enumerate() ([]string, error) {
+	escapedIDs, err := k.ReadSubKeyNames(0)
+	if err != nil {
+		return nil, err
+	}
+	var ids []string
+	for _, e := range escapedIDs {
+		id, err := url.PathUnescape(e)
+		if err == nil {
+			ids = append(ids, id)
+		}
+	}
+	return ids, nil
+}
+
+func (k *Key) open(name string) (*Key, error) {
+	fullpath := filepath.Join(k.Name, name)
+	nk, err := registry.OpenKey(k.Key, name, registry.ALL_ACCESS)
+	if err != nil {
+		return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err}
+	}
+	return &Key{nk, fullpath}, nil
+}
+
+func (k *Key) openid(id string) (*Key, error) {
+	escaped := url.PathEscape(id)
+	fullpath := filepath.Join(k.Name, escaped)
+	nk, err := k.open(escaped)
+	if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND {
+		return nil, &NotFoundError{id}
+	}
+	if err != nil {
+		return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err}
+	}
+	return nk, nil
+}
+
+func (k *Key) Remove(id string) error {
+	escaped := url.PathEscape(id)
+	err := registry.DeleteKey(k.Key, escaped)
+	if err != nil {
+		if err == syscall.ERROR_FILE_NOT_FOUND {
+			return &NotFoundError{id}
+		}
+		return &os.PathError{Op: "RegDeleteKey", Path: filepath.Join(k.Name, escaped), Err: err}
+	}
+	return nil
+}
+
+func (k *Key) set(id string, create bool, key string, state interface{}) error {
+	var sk *Key
+	var err error
+	if create {
+		var existing bool
+		eid := url.PathEscape(id)
+		sk, existing, err = createVolatileKey(k, eid, registry.ALL_ACCESS)
+		if err != nil {
+			return err
+		}
+		defer sk.Close()
+		if existing {
+			sk.Close()
+			return fmt.Errorf("container %s already exists", id)
+		}
+	} else {
+		sk, err = k.openid(id)
+		if err != nil {
+			return err
+		}
+		defer sk.Close()
+	}
+	switch reflect.TypeOf(state).Kind() {
+	case reflect.Bool:
+		v := uint32(0)
+		if state.(bool) {
+			v = 1
+		}
+		err = sk.SetDWordValue(key, v)
+	case reflect.Int:
+		err = sk.SetQWordValue(key, uint64(state.(int)))
+	case reflect.String:
+		err = sk.SetStringValue(key, state.(string))
+	default:
+		var js []byte
+		js, err = json.Marshal(state)
+		if err != nil {
+			return err
+		}
+		err = sk.SetBinaryValue(key, js)
+	}
+	if err != nil {
+		if err == syscall.ERROR_FILE_NOT_FOUND {
+			return &NoStateError{id, key}
+		}
+		return &os.PathError{Op: "RegSetValueEx", Path: sk.Name + ":" + key, Err: err}
+	}
+	return nil
+}
+
+func (k *Key) Create(id, key string, state interface{}) error {
+	return k.set(id, true, key, state)
+}
+
+func (k *Key) Set(id, key string, state interface{}) error {
+	return k.set(id, false, key, state)
+}
+
+func (k *Key) Clear(id, key string) error {
+	sk, err := k.openid(id)
+	if err != nil {
+		return err
+	}
+	defer sk.Close()
+	err = sk.DeleteValue(key)
+	if err != nil {
+		if err == syscall.ERROR_FILE_NOT_FOUND {
+			return &NoStateError{id, key}
+		}
+		return &os.PathError{Op: "RegDeleteValue", Path: sk.Name + ":" + key, Err: err}
+	}
+	return nil
+}
+
+func (k *Key) Get(id, key string, state interface{}) error {
+	sk, err := k.openid(id)
+	if err != nil {
+		return err
+	}
+	defer sk.Close()
+
+	var js []byte
+	switch reflect.TypeOf(state).Elem().Kind() {
+	case reflect.Bool:
+		var v uint64
+		v, _, err = sk.GetIntegerValue(key)
+		if err == nil {
+			*state.(*bool) = v != 0
+		}
+	case reflect.Int:
+		var v uint64
+		v, _, err = sk.GetIntegerValue(key)
+		if err == nil {
+			*state.(*int) = int(v)
+		}
+	case reflect.String:
+		var v string
+		v, _, err = sk.GetStringValue(key)
+		if err == nil {
+			*state.(*string) = string(v)
+		}
+	default:
+		js, _, err = sk.GetBinaryValue(key)
+	}
+	if err != nil {
+		if err == syscall.ERROR_FILE_NOT_FOUND {
+			return &NoStateError{id, key}
+		}
+		return &os.PathError{Op: "RegQueryValueEx", Path: sk.Name + ":" + key, Err: err}
+	}
+	if js != nil {
+		err = json.Unmarshal(js, state)
+	}
+	return err
+}

+ 51 - 0
vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go

@@ -0,0 +1,51 @@
+// Code generated by 'go generate'; DO NOT EDIT.
+
+package regstate
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+	errnoERROR_IO_PENDING = 997
+)
+
+var (
+	errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+	switch e {
+	case 0:
+		return nil
+	case errnoERROR_IO_PENDING:
+		return errERROR_IO_PENDING
+	}
+	// TODO: add more here, after collecting data on the common
+	// error values see on Windows. (perhaps when running
+	// all.bat?)
+	return e
+}
+
+var (
+	modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+
+	procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW")
+)
+
+func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) {
+	r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}

+ 71 - 0
vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go

@@ -0,0 +1,71 @@
+package runhcs
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"syscall"
+	"time"
+
+	"github.com/Microsoft/go-winio/pkg/guid"
+)
+
+// ContainerState represents the platform agnostic pieces relating to a
+// running container's status and state
+type ContainerState struct {
+	// Version is the OCI version for the container
+	Version string `json:"ociVersion"`
+	// ID is the container ID
+	ID string `json:"id"`
+	// InitProcessPid is the init process id in the parent namespace
+	InitProcessPid int `json:"pid"`
+	// Status is the current status of the container, running, paused, ...
+	Status string `json:"status"`
+	// Bundle is the path on the filesystem to the bundle
+	Bundle string `json:"bundle"`
+	// Rootfs is a path to a directory containing the container's root filesystem.
+	Rootfs string `json:"rootfs"`
+	// Created is the unix timestamp for the creation time of the container in UTC
+	Created time.Time `json:"created"`
+	// Annotations is the user defined annotations added to the config.
+	Annotations map[string]string `json:"annotations,omitempty"`
+	// The owner of the state directory (the owner of the container).
+	Owner string `json:"owner"`
+}
+
+// GetErrorFromPipe returns reads from `pipe` and verifies if the operation
+// returned success or error. If error converts that to an error and returns. If
+// `p` is not nill will issue a `Kill` and `Wait` for exit.
+func GetErrorFromPipe(pipe io.Reader, p *os.Process) error {
+	serr, err := ioutil.ReadAll(pipe)
+	if err != nil {
+		return err
+	}
+
+	if bytes.Equal(serr, ShimSuccess) {
+		return nil
+	}
+
+	extra := ""
+	if p != nil {
+		_ = p.Kill()
+		state, err := p.Wait()
+		if err != nil {
+			panic(err)
+		}
+		extra = fmt.Sprintf(", exit code %d", state.Sys().(syscall.WaitStatus).ExitCode)
+	}
+	if len(serr) == 0 {
+		return fmt.Errorf("unknown shim failure%s", extra)
+	}
+
+	return errors.New(string(serr))
+}
+
+// VMPipePath returns the named pipe path for the vm shim.
+func VMPipePath(hostUniqueID guid.GUID) string {
+	return SafePipePath("runhcs-vm-" + hostUniqueID.String())
+}

+ 16 - 0
vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go

@@ -0,0 +1,16 @@
+package runhcs
+
+import "net/url"
+
+const (
+	SafePipePrefix = `\\.\pipe\ProtectedPrefix\Administrators\`
+)
+
+// ShimSuccess is the byte stream returned on a successful operation.
+var ShimSuccess = []byte{0, 'O', 'K', 0}
+
+func SafePipePath(name string) string {
+	// Use a pipe in the Administrators protected prefixed to prevent malicious
+	// squatting.
+	return SafePipePrefix + url.PathEscape(name)
+}

+ 43 - 0
vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go

@@ -0,0 +1,43 @@
+package runhcs
+
+import (
+	"encoding/json"
+
+	"github.com/Microsoft/go-winio"
+)
+
+// VMRequestOp is an operation that can be issued to a VM shim.
+type VMRequestOp string
+
+const (
+	// OpCreateContainer is a create container request.
+	OpCreateContainer VMRequestOp = "create"
+	// OpSyncNamespace is a `cni.NamespaceTypeGuest` sync request with the UVM.
+	OpSyncNamespace VMRequestOp = "sync"
+	// OpUnmountContainer is a container unmount request.
+	OpUnmountContainer VMRequestOp = "unmount"
+	// OpUnmountContainerDiskOnly is a container unmount disk request.
+	OpUnmountContainerDiskOnly VMRequestOp = "unmount-disk"
+)
+
+// VMRequest is an operation request that is issued to a VM shim.
+type VMRequest struct {
+	ID string
+	Op VMRequestOp
+}
+
+// IssueVMRequest issues a request to a shim at the given pipe.
+func IssueVMRequest(pipepath string, req *VMRequest) error {
+	pipe, err := winio.DialPipe(pipepath, nil)
+	if err != nil {
+		return err
+	}
+	defer pipe.Close()
+	if err := json.NewEncoder(pipe).Encode(req); err != nil {
+		return err
+	}
+	if err := GetErrorFromPipe(pipe, nil); err != nil {
+		return err
+	}
+	return nil
+}

+ 3 - 0
vendor/github.com/containerd/go-cni/.gitignore

@@ -0,0 +1,3 @@
+/bin/
+coverage.txt
+profile.out

+ 23 - 0
vendor/github.com/containerd/go-cni/.golangci.yml

@@ -0,0 +1,23 @@
+linters:
+  enable:
+    - structcheck
+    - varcheck
+    - staticcheck
+    - unconvert
+    - gofmt
+    - goimports
+    - revive
+    - ineffassign
+    - vet
+    - unused
+    - misspell
+  disable:
+    - errcheck
+
+# FIXME: re-enable after fixing GoDoc in this repository
+#issues:
+#  include:
+#    - EXC0002
+
+run:
+  timeout: 2m

+ 201 - 0
vendor/github.com/containerd/go-cni/LICENSE

@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 41 - 0
vendor/github.com/containerd/go-cni/Makefile

@@ -0,0 +1,41 @@
+#   Copyright The containerd Authors.
+
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+
+#       http://www.apache.org/licenses/LICENSE-2.0
+
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+TESTFLAGS_PARALLEL ?= 8
+
+EXTRA_TESTFLAGS ?=
+
+# quiet or not
+ifeq ($(V),1)
+	Q =
+else
+	Q = @
+endif
+
+.PHONY: test integration clean help
+
+help: ## this help
+	@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort
+
+test: ## run tests, except integration tests and tests that require root
+	$(Q)go test -v -race $(EXTRA_TESTFLAGS) -count=1 ./...
+
+integration: bin/integration.test ## run integration test
+	$(Q)bin/integration.test -test.v -test.count=1 -test.root $(EXTRA_TESTFLAGS) -test.parallel $(TESTFLAGS_PARALLEL)
+
+bin/integration.test: ## build integration test binary into bin
+	$(Q)cd ./integration && go test -race -c . -o ../bin/integration.test
+
+clean: ## clean up binaries
+	$(Q)rm -rf bin/

+ 96 - 0
vendor/github.com/containerd/go-cni/README.md

@@ -0,0 +1,96 @@
+# go-cni
+
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/go-cni)](https://pkg.go.dev/github.com/containerd/go-cni)
+[![Build Status](https://github.com/containerd/go-cni/workflows/CI/badge.svg)](https://github.com/containerd/go-cni/actions?query=workflow%3ACI)
+[![codecov](https://codecov.io/gh/containerd/go-cni/branch/main/graph/badge.svg)](https://codecov.io/gh/containerd/go-cni)
+[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/go-cni)](https://goreportcard.com/report/github.com/containerd/go-cni)
+
+A generic CNI library to provide APIs for CNI plugin interactions. The library provides APIs to:
+
+- Load CNI network config from different sources  
+- Setup networks for container namespace
+- Remove networks from container namespace
+- Query status of CNI network plugin initialization
+- Check verifies the network is still in desired state
+
+go-cni aims to support plugins that implement [Container Network Interface](https://github.com/containernetworking/cni)
+
+## Usage
+```go
+package main
+
+import (
+	"context"
+	"fmt"
+	"log"
+
+	gocni "github.com/containerd/go-cni"
+)
+
+func main() {
+	id := "example"
+	netns := "/var/run/netns/example-ns-1"
+
+	// CNI allows multiple CNI configurations and the network interface
+	// will be named by eth0, eth1, ..., ethN.
+	ifPrefixName := "eth"
+	defaultIfName := "eth0"
+
+	// Initializes library
+	l, err := gocni.New(
+		// one for loopback network interface
+		gocni.WithMinNetworkCount(2),
+		gocni.WithPluginConfDir("/etc/cni/net.d"),
+		gocni.WithPluginDir([]string{"/opt/cni/bin"}),
+		// Sets the prefix for network interfaces, eth by default
+		gocni.WithInterfacePrefix(ifPrefixName))
+	if err != nil {
+		log.Fatalf("failed to initialize cni library: %v", err)
+	}
+
+	// Load the cni configuration
+	if err := l.Load(gocni.WithLoNetwork, gocni.WithDefaultConf); err != nil {
+		log.Fatalf("failed to load cni configuration: %v", err)
+	}
+
+	// Setup network for namespace.
+	labels := map[string]string{
+		"K8S_POD_NAMESPACE":          "namespace1",
+		"K8S_POD_NAME":               "pod1",
+		"K8S_POD_INFRA_CONTAINER_ID": id,
+		// Plugin tolerates all Args embedded by unknown labels, like
+		// K8S_POD_NAMESPACE/NAME/INFRA_CONTAINER_ID...
+		"IgnoreUnknown": "1",
+	}
+
+	ctx := context.Background()
+
+	// Teardown network
+	defer func() {
+		if err := l.Remove(ctx, id, netns, gocni.WithLabels(labels)); err != nil {
+			log.Fatalf("failed to teardown network: %v", err)
+		}
+	}()
+
+	// Setup network
+	result, err := l.Setup(ctx, id, netns, gocni.WithLabels(labels))
+	if err != nil {
+		log.Fatalf("failed to setup network for namespace: %v", err)
+	}
+
+	// Get IP of the default interface
+	IP := result.Interfaces[defaultIfName].IPConfigs[0].IP.String()
+	fmt.Printf("IP of the default interface %s:%s", defaultIfName, IP)
+}
+```
+
+## Project details
+
+The go-cni is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
+As a containerd sub-project, you will find the:
+
+ * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
+ * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
+ * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
+
+information in our [`containerd/project`](https://github.com/containerd/project) repository.

+ 312 - 0
vendor/github.com/containerd/go-cni/cni.go

@@ -0,0 +1,312 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package cni
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"strings"
+	"sync"
+
+	cnilibrary "github.com/containernetworking/cni/libcni"
+	"github.com/containernetworking/cni/pkg/invoke"
+	"github.com/containernetworking/cni/pkg/types"
+	types100 "github.com/containernetworking/cni/pkg/types/100"
+	"github.com/containernetworking/cni/pkg/version"
+)
+
+type CNI interface {
+	// Setup setup the network for the namespace
+	Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error)
+	// SetupSerially sets up each of the network interfaces for the namespace in serial
+	SetupSerially(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error)
+	// Remove tears down the network of the namespace.
+	Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error
+	// Check checks if the network is still in desired state
+	Check(ctx context.Context, id string, path string, opts ...NamespaceOpts) error
+	// Load loads the cni network config
+	Load(opts ...Opt) error
+	// Status checks the status of the cni initialization
+	Status() error
+	// GetConfig returns a copy of the CNI plugin configurations as parsed by CNI
+	GetConfig() *ConfigResult
+}
+
+type ConfigResult struct {
+	PluginDirs       []string
+	PluginConfDir    string
+	PluginMaxConfNum int
+	Prefix           string
+	Networks         []*ConfNetwork
+}
+
+type ConfNetwork struct {
+	Config *NetworkConfList
+	IFName string
+}
+
+// NetworkConfList is a source bytes to string version of cnilibrary.NetworkConfigList
+type NetworkConfList struct {
+	Name       string
+	CNIVersion string
+	Plugins    []*NetworkConf
+	Source     string
+}
+
+// NetworkConf is a source bytes to string conversion of cnilibrary.NetworkConfig
+type NetworkConf struct {
+	Network *types.NetConf
+	Source  string
+}
+
+type libcni struct {
+	config
+
+	cniConfig    cnilibrary.CNI
+	networkCount int // minimum network plugin configurations needed to initialize cni
+	networks     []*Network
+	sync.RWMutex
+}
+
+func defaultCNIConfig() *libcni {
+	return &libcni{
+		config: config{
+			pluginDirs:       []string{DefaultCNIDir},
+			pluginConfDir:    DefaultNetDir,
+			pluginMaxConfNum: DefaultMaxConfNum,
+			prefix:           DefaultPrefix,
+		},
+		cniConfig: cnilibrary.NewCNIConfig(
+			[]string{
+				DefaultCNIDir,
+			},
+			&invoke.DefaultExec{
+				RawExec:       &invoke.RawExec{Stderr: os.Stderr},
+				PluginDecoder: version.PluginDecoder{},
+			},
+		),
+		networkCount: 1,
+	}
+}
+
+// New creates a new libcni instance.
+func New(config ...Opt) (CNI, error) {
+	cni := defaultCNIConfig()
+	var err error
+	for _, c := range config {
+		if err = c(cni); err != nil {
+			return nil, err
+		}
+	}
+	return cni, nil
+}
+
+// Load loads the latest config from cni config files.
+func (c *libcni) Load(opts ...Opt) error {
+	var err error
+	c.Lock()
+	defer c.Unlock()
+	// Reset the networks on a load operation to ensure
+	// config happens on a clean slate
+	c.reset()
+
+	for _, o := range opts {
+		if err = o(c); err != nil {
+			return fmt.Errorf("cni config load failed: %v: %w", err, ErrLoad)
+		}
+	}
+	return nil
+}
+
+// Status returns the status of CNI initialization.
+func (c *libcni) Status() error {
+	c.RLock()
+	defer c.RUnlock()
+	if len(c.networks) < c.networkCount {
+		return ErrCNINotInitialized
+	}
+	return nil
+}
+
+// Networks returns all the configured networks.
+// NOTE: Caller MUST NOT modify anything in the returned array.
+func (c *libcni) Networks() []*Network {
+	c.RLock()
+	defer c.RUnlock()
+	return append([]*Network{}, c.networks...)
+}
+
+// Setup setups the network in the namespace and returns a Result
+func (c *libcni) Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) {
+	if err := c.Status(); err != nil {
+		return nil, err
+	}
+	ns, err := newNamespace(id, path, opts...)
+	if err != nil {
+		return nil, err
+	}
+	result, err := c.attachNetworks(ctx, ns)
+	if err != nil {
+		return nil, err
+	}
+	return c.createResult(result)
+}
+
+// SetupSerially setups the network in the namespace and returns a Result
+func (c *libcni) SetupSerially(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) {
+	if err := c.Status(); err != nil {
+		return nil, err
+	}
+	ns, err := newNamespace(id, path, opts...)
+	if err != nil {
+		return nil, err
+	}
+	result, err := c.attachNetworksSerially(ctx, ns)
+	if err != nil {
+		return nil, err
+	}
+	return c.createResult(result)
+}
+
+func (c *libcni) attachNetworksSerially(ctx context.Context, ns *Namespace) ([]*types100.Result, error) {
+	var results []*types100.Result
+	for _, network := range c.Networks() {
+		r, err := network.Attach(ctx, ns)
+		if err != nil {
+			return nil, err
+		}
+		results = append(results, r)
+	}
+	return results, nil
+}
+
+type asynchAttachResult struct {
+	index int
+	res   *types100.Result
+	err   error
+}
+
+func asynchAttach(ctx context.Context, index int, n *Network, ns *Namespace, wg *sync.WaitGroup, rc chan asynchAttachResult) {
+	defer wg.Done()
+	r, err := n.Attach(ctx, ns)
+	rc <- asynchAttachResult{index: index, res: r, err: err}
+}
+
+func (c *libcni) attachNetworks(ctx context.Context, ns *Namespace) ([]*types100.Result, error) {
+	var wg sync.WaitGroup
+	var firstError error
+	results := make([]*types100.Result, len(c.Networks()))
+	rc := make(chan asynchAttachResult)
+
+	for i, network := range c.Networks() {
+		wg.Add(1)
+		go asynchAttach(ctx, i, network, ns, &wg, rc)
+	}
+
+	for range c.Networks() {
+		rs := <-rc
+		if rs.err != nil && firstError == nil {
+			firstError = rs.err
+		}
+		results[rs.index] = rs.res
+	}
+	wg.Wait()
+
+	return results, firstError
+}
+
+// Remove removes the network config from the namespace
+func (c *libcni) Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error {
+	if err := c.Status(); err != nil {
+		return err
+	}
+	ns, err := newNamespace(id, path, opts...)
+	if err != nil {
+		return err
+	}
+	for _, network := range c.Networks() {
+		if err := network.Remove(ctx, ns); err != nil {
+			// Based on CNI spec v0.7.0, empty network namespace is allowed to
+			// do best effort cleanup. However, it is not handled consistently
+			// right now:
+			// https://github.com/containernetworking/plugins/issues/210
+			// TODO(random-liu): Remove the error handling when the issue is
+			// fixed and the CNI spec v0.6.0 support is deprecated.
+			// NOTE(claudiub): Some CNIs could return a "not found" error, which could mean that
+			// it was already deleted.
+			if (path == "" && strings.Contains(err.Error(), "no such file or directory")) || strings.Contains(err.Error(), "not found") {
+				continue
+			}
+			return err
+		}
+	}
+	return nil
+}
+
+// Check checks if the network is still in desired state
+func (c *libcni) Check(ctx context.Context, id string, path string, opts ...NamespaceOpts) error {
+	if err := c.Status(); err != nil {
+		return err
+	}
+	ns, err := newNamespace(id, path, opts...)
+	if err != nil {
+		return err
+	}
+	for _, network := range c.Networks() {
+		err := network.Check(ctx, ns)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// GetConfig returns a copy of the CNI plugin configurations as parsed by CNI
+func (c *libcni) GetConfig() *ConfigResult {
+	c.RLock()
+	defer c.RUnlock()
+	r := &ConfigResult{
+		PluginDirs:       c.config.pluginDirs,
+		PluginConfDir:    c.config.pluginConfDir,
+		PluginMaxConfNum: c.config.pluginMaxConfNum,
+		Prefix:           c.config.prefix,
+	}
+	for _, network := range c.networks {
+		conf := &NetworkConfList{
+			Name:       network.config.Name,
+			CNIVersion: network.config.CNIVersion,
+			Source:     string(network.config.Bytes),
+		}
+		for _, plugin := range network.config.Plugins {
+			conf.Plugins = append(conf.Plugins, &NetworkConf{
+				Network: plugin.Network,
+				Source:  string(plugin.Bytes),
+			})
+		}
+		r.Networks = append(r.Networks, &ConfNetwork{
+			Config: conf,
+			IFName: network.ifName,
+		})
+	}
+	return r
+}
+
+func (c *libcni) reset() {
+	c.networks = nil
+}

+ 34 - 0
vendor/github.com/containerd/go-cni/deprecated.go

@@ -0,0 +1,34 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package cni
+
+import types100 "github.com/containernetworking/cni/pkg/types/100"
+
+// Deprecated: use cni.Opt instead
+type CNIOpt = Opt //revive:disable // type name will be used as cni.CNIOpt by other packages, and that stutters
+
+// Deprecated: use cni.Result instead
+type CNIResult = Result //revive:disable // type name will be used as cni.CNIResult by other packages, and that stutters
+
+// GetCNIResultFromResults creates a Result from the given slice of types100.Result,
+// adding structured data containing the interface configuration for each of the
+// interfaces created in the namespace. It returns an error if validation of
+// results fails, or if a network could not be found.
+// Deprecated: do not use
+func (c *libcni) GetCNIResultFromResults(results []*types100.Result) (*Result, error) {
+	return c.createResult(results)
+}

+ 55 - 0
vendor/github.com/containerd/go-cni/errors.go

@@ -0,0 +1,55 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package cni
+
+import (
+	"errors"
+)
+
+var (
+	ErrCNINotInitialized = errors.New("cni plugin not initialized")
+	ErrInvalidConfig     = errors.New("invalid cni config")
+	ErrNotFound          = errors.New("not found")
+	ErrRead              = errors.New("failed to read config file")
+	ErrInvalidResult     = errors.New("invalid result")
+	ErrLoad              = errors.New("failed to load cni config")
+)
+
+// IsCNINotInitialized returns true if the error is due to cni config not being initialized
+func IsCNINotInitialized(err error) bool {
+	return errors.Is(err, ErrCNINotInitialized)
+}
+
+// IsInvalidConfig returns true if the error is invalid cni config
+func IsInvalidConfig(err error) bool {
+	return errors.Is(err, ErrInvalidConfig)
+}
+
+// IsNotFound returns true if the error is due to a missing config or result
+func IsNotFound(err error) bool {
+	return errors.Is(err, ErrNotFound)
+}
+
+// IsReadFailure return true if the error is a config read failure
+func IsReadFailure(err error) bool {
+	return errors.Is(err, ErrRead)
+}
+
+// IsInvalidResult return true if the error is due to invalid cni result
+func IsInvalidResult(err error) bool {
+	return errors.Is(err, ErrInvalidResult)
+}

+ 41 - 0
vendor/github.com/containerd/go-cni/helper.go

@@ -0,0 +1,41 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package cni
+
+import (
+	"fmt"
+
+	types100 "github.com/containernetworking/cni/pkg/types/100"
+)
+
+func validateInterfaceConfig(ipConf *types100.IPConfig, ifs int) error {
+	if ipConf == nil {
+		return fmt.Errorf("invalid IP configuration (nil)")
+	}
+	if ipConf.Interface != nil && *ipConf.Interface > ifs {
+		return fmt.Errorf("invalid IP configuration (interface number %d is > number of interfaces %d)", *ipConf.Interface, ifs)
+	}
+	return nil
+}
+
+func getIfName(prefix string, i int) string {
+	return fmt.Sprintf("%s%d", prefix, i)
+}
+
+func defaultInterface(prefix string) string {
+	return getIfName(prefix, 0)
+}

+ 81 - 0
vendor/github.com/containerd/go-cni/namespace.go

@@ -0,0 +1,81 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package cni
+
+import (
+	"context"
+
+	cnilibrary "github.com/containernetworking/cni/libcni"
+	types100 "github.com/containernetworking/cni/pkg/types/100"
+)
+
+type Network struct {
+	cni    cnilibrary.CNI
+	config *cnilibrary.NetworkConfigList
+	ifName string
+}
+
+func (n *Network) Attach(ctx context.Context, ns *Namespace) (*types100.Result, error) {
+	r, err := n.cni.AddNetworkList(ctx, n.config, ns.config(n.ifName))
+	if err != nil {
+		return nil, err
+	}
+	return types100.NewResultFromResult(r)
+}
+
+func (n *Network) Remove(ctx context.Context, ns *Namespace) error {
+	return n.cni.DelNetworkList(ctx, n.config, ns.config(n.ifName))
+}
+
+func (n *Network) Check(ctx context.Context, ns *Namespace) error {
+	return n.cni.CheckNetworkList(ctx, n.config, ns.config(n.ifName))
+}
+
+type Namespace struct {
+	id             string
+	path           string
+	capabilityArgs map[string]interface{}
+	args           map[string]string
+}
+
+func newNamespace(id, path string, opts ...NamespaceOpts) (*Namespace, error) {
+	ns := &Namespace{
+		id:             id,
+		path:           path,
+		capabilityArgs: make(map[string]interface{}),
+		args:           make(map[string]string),
+	}
+	for _, o := range opts {
+		if err := o(ns); err != nil {
+			return nil, err
+		}
+	}
+	return ns, nil
+}
+
+func (ns *Namespace) config(ifName string) *cnilibrary.RuntimeConf {
+	c := &cnilibrary.RuntimeConf{
+		ContainerID: ns.id,
+		NetNS:       ns.path,
+		IfName:      ifName,
+	}
+	for k, v := range ns.args {
+		c.Args = append(c.Args, [2]string{k, v})
+	}
+	c.CapabilityArgs = ns.capabilityArgs
+	return c
+}

+ 77 - 0
vendor/github.com/containerd/go-cni/namespace_opts.go

@@ -0,0 +1,77 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package cni
+
+type NamespaceOpts func(s *Namespace) error
+
+// WithCapabilityPortMap adds support for port mappings
+func WithCapabilityPortMap(portMapping []PortMapping) NamespaceOpts {
+	return func(c *Namespace) error {
+		c.capabilityArgs["portMappings"] = portMapping
+		return nil
+	}
+}
+
+// WithCapabilityIPRanges adds support for ip ranges
+func WithCapabilityIPRanges(ipRanges []IPRanges) NamespaceOpts {
+	return func(c *Namespace) error {
+		c.capabilityArgs["ipRanges"] = ipRanges
+		return nil
+	}
+}
+
+// WithCapabilityBandWitdh adds support for bandwidth limits
+func WithCapabilityBandWidth(bandWidth BandWidth) NamespaceOpts {
+	return func(c *Namespace) error {
+		c.capabilityArgs["bandwidth"] = bandWidth
+		return nil
+	}
+}
+
+// WithCapabilityDNS adds support for dns
+func WithCapabilityDNS(dns DNS) NamespaceOpts {
+	return func(c *Namespace) error {
+		c.capabilityArgs["dns"] = dns
+		return nil
+	}
+}
+
+// WithCapability support well-known capabilities
+// https://www.cni.dev/docs/conventions/#well-known-capabilities
+func WithCapability(name string, capability interface{}) NamespaceOpts {
+	return func(c *Namespace) error {
+		c.capabilityArgs[name] = capability
+		return nil
+	}
+}
+
+// Args
+func WithLabels(labels map[string]string) NamespaceOpts {
+	return func(c *Namespace) error {
+		for k, v := range labels {
+			c.args[k] = v
+		}
+		return nil
+	}
+}
+
+func WithArgs(k, v string) NamespaceOpts {
+	return func(c *Namespace) error {
+		c.args[k] = v
+		return nil
+	}
+}

+ 273 - 0
vendor/github.com/containerd/go-cni/opts.go

@@ -0,0 +1,273 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package cni
+
+import (
+	"fmt"
+	"os"
+	"sort"
+	"strings"
+
+	cnilibrary "github.com/containernetworking/cni/libcni"
+	"github.com/containernetworking/cni/pkg/invoke"
+	"github.com/containernetworking/cni/pkg/version"
+)
+
+// Opt sets options for a CNI instance
+type Opt func(c *libcni) error
+
+// WithInterfacePrefix sets the prefix for network interfaces
+// e.g. eth or wlan
+func WithInterfacePrefix(prefix string) Opt {
+	return func(c *libcni) error {
+		c.prefix = prefix
+		return nil
+	}
+}
+
+// WithPluginDir can be used to set the locations of
+// the cni plugin binaries
+func WithPluginDir(dirs []string) Opt {
+	return func(c *libcni) error {
+		c.pluginDirs = dirs
+		c.cniConfig = cnilibrary.NewCNIConfig(
+			dirs,
+			&invoke.DefaultExec{
+				RawExec:       &invoke.RawExec{Stderr: os.Stderr},
+				PluginDecoder: version.PluginDecoder{},
+			},
+		)
+		return nil
+	}
+}
+
+// WithPluginConfDir can be used to configure the
+// cni configuration directory.
+func WithPluginConfDir(dir string) Opt {
+	return func(c *libcni) error {
+		c.pluginConfDir = dir
+		return nil
+	}
+}
+
+// WithPluginMaxConfNum can be used to configure the
+// max cni plugin config file num.
+func WithPluginMaxConfNum(max int) Opt {
+	return func(c *libcni) error {
+		c.pluginMaxConfNum = max
+		return nil
+	}
+}
+
+// WithMinNetworkCount can be used to configure the
+// minimum networks to be configured and initialized
+// for the status to report success. By default its 1.
+func WithMinNetworkCount(count int) Opt {
+	return func(c *libcni) error {
+		c.networkCount = count
+		return nil
+	}
+}
+
+// WithLoNetwork can be used to load the loopback
+// network config.
+func WithLoNetwork(c *libcni) error {
+	loConfig, _ := cnilibrary.ConfListFromBytes([]byte(`{
+"cniVersion": "0.3.1",
+"name": "cni-loopback",
+"plugins": [{
+  "type": "loopback"
+}]
+}`))
+
+	c.networks = append(c.networks, &Network{
+		cni:    c.cniConfig,
+		config: loConfig,
+		ifName: "lo",
+	})
+	return nil
+}
+
+// WithConf can be used to load config directly
+// from byte.
+func WithConf(bytes []byte) Opt {
+	return WithConfIndex(bytes, 0)
+}
+
+// WithConfIndex can be used to load config directly
+// from byte and set the interface name's index.
+func WithConfIndex(bytes []byte, index int) Opt {
+	return func(c *libcni) error {
+		conf, err := cnilibrary.ConfFromBytes(bytes)
+		if err != nil {
+			return err
+		}
+		confList, err := cnilibrary.ConfListFromConf(conf)
+		if err != nil {
+			return err
+		}
+		c.networks = append(c.networks, &Network{
+			cni:    c.cniConfig,
+			config: confList,
+			ifName: getIfName(c.prefix, index),
+		})
+		return nil
+	}
+}
+
+// WithConfFile can be used to load network config
+// from an .conf file. Supported with absolute fileName
+// with path only.
+func WithConfFile(fileName string) Opt {
+	return func(c *libcni) error {
+		conf, err := cnilibrary.ConfFromFile(fileName)
+		if err != nil {
+			return err
+		}
+		// upconvert to conf list
+		confList, err := cnilibrary.ConfListFromConf(conf)
+		if err != nil {
+			return err
+		}
+		c.networks = append(c.networks, &Network{
+			cni:    c.cniConfig,
+			config: confList,
+			ifName: getIfName(c.prefix, 0),
+		})
+		return nil
+	}
+}
+
+// WithConfListBytes can be used to load network config list directly
+// from byte
+func WithConfListBytes(bytes []byte) Opt {
+	return func(c *libcni) error {
+		confList, err := cnilibrary.ConfListFromBytes(bytes)
+		if err != nil {
+			return err
+		}
+		i := len(c.networks)
+		c.networks = append(c.networks, &Network{
+			cni:    c.cniConfig,
+			config: confList,
+			ifName: getIfName(c.prefix, i),
+		})
+		return nil
+	}
+}
+
+// WithConfListFile can be used to load network config
+// from an .conflist file. Supported with absolute fileName
+// with path only.
+func WithConfListFile(fileName string) Opt {
+	return func(c *libcni) error {
+		confList, err := cnilibrary.ConfListFromFile(fileName)
+		if err != nil {
+			return err
+		}
+		i := len(c.networks)
+		c.networks = append(c.networks, &Network{
+			cni:    c.cniConfig,
+			config: confList,
+			ifName: getIfName(c.prefix, i),
+		})
+		return nil
+	}
+}
+
+// WithDefaultConf can be used to detect the default network
+// config file from the configured cni config directory and load
+// it.
+// Since the CNI spec does not specify a way to detect default networks,
+// the convention chosen is - the first network configuration in the sorted
+// list of network conf files as the default network.
+func WithDefaultConf(c *libcni) error {
+	return loadFromConfDir(c, c.pluginMaxConfNum)
+}
+
+// WithAllConf can be used to detect all network config
+// files from the configured cni config directory and load
+// them.
+func WithAllConf(c *libcni) error {
+	return loadFromConfDir(c, 0)
+}
+
+// loadFromConfDir detects network config files from the
+// configured cni config directory and load them. max is
+// the maximum network config to load (max i<= 0 means no limit).
+func loadFromConfDir(c *libcni, max int) error {
+	files, err := cnilibrary.ConfFiles(c.pluginConfDir, []string{".conf", ".conflist", ".json"})
+	switch {
+	case err != nil:
+		return fmt.Errorf("failed to read config file: %v: %w", err, ErrRead)
+	case len(files) == 0:
+		return fmt.Errorf("no network config found in %s: %w", c.pluginConfDir, ErrCNINotInitialized)
+	}
+
+	// files contains the network config files associated with cni network.
+	// Use lexicographical way as a defined order for network config files.
+	sort.Strings(files)
+	// Since the CNI spec does not specify a way to detect default networks,
+	// the convention chosen is - the first network configuration in the sorted
+	// list of network conf files as the default network and choose the default
+	// interface provided during init as the network interface for this default
+	// network. For every other network use a generated interface id.
+	i := 0
+	var networks []*Network
+	for _, confFile := range files {
+		var confList *cnilibrary.NetworkConfigList
+		if strings.HasSuffix(confFile, ".conflist") {
+			confList, err = cnilibrary.ConfListFromFile(confFile)
+			if err != nil {
+				return fmt.Errorf("failed to load CNI config list file %s: %v: %w", confFile, err, ErrInvalidConfig)
+			}
+		} else {
+			conf, err := cnilibrary.ConfFromFile(confFile)
+			if err != nil {
+				return fmt.Errorf("failed to load CNI config file %s: %v: %w", confFile, err, ErrInvalidConfig)
+			}
+			// Ensure the config has a "type" so we know what plugin to run.
+			// Also catches the case where somebody put a conflist into a conf file.
+			if conf.Network.Type == "" {
+				return fmt.Errorf("network type not found in %s: %w", confFile, ErrInvalidConfig)
+			}
+
+			confList, err = cnilibrary.ConfListFromConf(conf)
+			if err != nil {
+				return fmt.Errorf("failed to convert CNI config file %s to CNI config list: %v: %w", confFile, err, ErrInvalidConfig)
+			}
+		}
+		if len(confList.Plugins) == 0 {
+			return fmt.Errorf("CNI config list in config file %s has no networks, skipping: %w", confFile, ErrInvalidConfig)
+
+		}
+		networks = append(networks, &Network{
+			cni:    c.cniConfig,
+			config: confList,
+			ifName: getIfName(c.prefix, i),
+		})
+		i++
+		if i == max {
+			break
+		}
+	}
+	if len(networks) == 0 {
+		return fmt.Errorf("no valid networks found in %s: %w", c.pluginDirs, ErrCNINotInitialized)
+	}
+	c.networks = append(c.networks, networks...)
+	return nil
+}

+ 114 - 0
vendor/github.com/containerd/go-cni/result.go

@@ -0,0 +1,114 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package cni
+
+import (
+	"fmt"
+	"net"
+
+	"github.com/containernetworking/cni/pkg/types"
+	types100 "github.com/containernetworking/cni/pkg/types/100"
+)
+
+type IPConfig struct {
+	IP      net.IP
+	Gateway net.IP
+}
+
+// Result contains the network information returned by CNI.Setup
+//
+// a) Interfaces list. Depending on the plugin, this can include the sandbox
+//    (eg, container or hypervisor) interface name and/or the host interface
+//    name, the hardware addresses of each interface, and details about the
+//    sandbox (if any) the interface is in.
+// b) IP configuration assigned to each  interface. The IPv4 and/or IPv6 addresses,
+//    gateways, and routes assigned to sandbox and/or host interfaces.
+// c) DNS information. Dictionary that includes DNS information for nameservers,
+//     domain, search domains and options.
+type Result struct {
+	Interfaces map[string]*Config
+	DNS        []types.DNS
+	Routes     []*types.Route
+	raw        []*types100.Result
+}
+
+// Raw returns the raw CNI results of multiple networks.
+func (r *Result) Raw() []*types100.Result {
+	return r.raw
+}
+
+type Config struct {
+	IPConfigs []*IPConfig
+	Mac       string
+	Sandbox   string
+}
+
+// createResult creates a Result from the given slice of types100.Result, adding
+// structured data containing the interface configuration for each of the
+// interfaces created in the namespace. It returns an error if validation of
+// results fails, or if a network could not be found.
+func (c *libcni) createResult(results []*types100.Result) (*Result, error) {
+	c.RLock()
+	defer c.RUnlock()
+	r := &Result{
+		Interfaces: make(map[string]*Config),
+		raw:        results,
+	}
+
+	// Plugins may not need to return Interfaces in result if
+	// if there are no multiple interfaces created. In that case
+	// all configs should be applied against default interface
+	r.Interfaces[defaultInterface(c.prefix)] = &Config{}
+
+	// Walk through all the results
+	for _, result := range results {
+		// Walk through all the interface in each result
+		for _, intf := range result.Interfaces {
+			r.Interfaces[intf.Name] = &Config{
+				Mac:     intf.Mac,
+				Sandbox: intf.Sandbox,
+			}
+		}
+		// Walk through all the IPs in the result and attach it to corresponding
+		// interfaces
+		for _, ipConf := range result.IPs {
+			if err := validateInterfaceConfig(ipConf, len(result.Interfaces)); err != nil {
+				return nil, fmt.Errorf("invalid interface config: %v: %w", err, ErrInvalidResult)
+			}
+			name := c.getInterfaceName(result.Interfaces, ipConf)
+			r.Interfaces[name].IPConfigs = append(r.Interfaces[name].IPConfigs,
+				&IPConfig{IP: ipConf.Address.IP, Gateway: ipConf.Gateway})
+		}
+		r.DNS = append(r.DNS, result.DNS)
+		r.Routes = append(r.Routes, result.Routes...)
+	}
+	if _, ok := r.Interfaces[defaultInterface(c.prefix)]; !ok {
+		return nil, fmt.Errorf("default network not found for: %s: %w", defaultInterface(c.prefix), ErrNotFound)
+	}
+	return r, nil
+}
+
+// getInterfaceName returns the interface name if the plugins
+// return the result with associated interfaces. If interface
+// is not present then default interface name is used
+func (c *libcni) getInterfaceName(interfaces []*types100.Interface,
+	ipConf *types100.IPConfig) string {
+	if ipConf.Interface != nil {
+		return interfaces[*ipConf.Interface].Name
+	}
+	return defaultInterface(c.prefix)
+}

+ 78 - 0
vendor/github.com/containerd/go-cni/testutils.go

@@ -0,0 +1,78 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package cni
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path"
+	"testing"
+)
+
+func makeTmpDir(prefix string) (string, error) {
+	tmpDir, err := ioutil.TempDir(os.TempDir(), prefix)
+	if err != nil {
+		return "", err
+	}
+	return tmpDir, nil
+}
+
+func makeFakeCNIConfig(t *testing.T) (string, string) {
+	cniDir, err := makeTmpDir("fakecni")
+	if err != nil {
+		t.Fatalf("Failed to create plugin config dir: %v", err)
+	}
+
+	cniConfDir := path.Join(cniDir, "net.d")
+	err = os.MkdirAll(cniConfDir, 0777)
+	if err != nil {
+		t.Fatalf("Failed to create network config dir: %v", err)
+	}
+
+	networkConfig1 := path.Join(cniConfDir, "mocknetwork1.conf")
+	f1, err := os.Create(networkConfig1)
+	if err != nil {
+		t.Fatalf("Failed to create network config %v: %v", f1, err)
+	}
+	networkConfig2 := path.Join(cniConfDir, "mocknetwork2.conf")
+	f2, err := os.Create(networkConfig2)
+	if err != nil {
+		t.Fatalf("Failed to create network config %v: %v", f2, err)
+	}
+
+	cfg1 := fmt.Sprintf(`{ "name": "%s", "type": "%s", "capabilities": {"portMappings": true}  }`, "plugin1", "fakecni")
+	_, err = f1.WriteString(cfg1)
+	if err != nil {
+		t.Fatalf("Failed to write network config file %v: %v", f1, err)
+	}
+	f1.Close()
+	cfg2 := fmt.Sprintf(`{ "name": "%s", "type": "%s", "capabilities": {"portMappings": true}  }`, "plugin2", "fakecni")
+	_, err = f2.WriteString(cfg2)
+	if err != nil {
+		t.Fatalf("Failed to write network config file %v: %v", f2, err)
+	}
+	f2.Close()
+	return cniDir, cniConfDir
+}
+
+func tearDownCNIConfig(t *testing.T, confDir string) {
+	err := os.RemoveAll(confDir)
+	if err != nil {
+		t.Fatalf("Failed to cleanup CNI configs: %v", err)
+	}
+}

+ 65 - 0
vendor/github.com/containerd/go-cni/types.go

@@ -0,0 +1,65 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package cni
+
+const (
+	CNIPluginName        = "cni"
+	DefaultNetDir        = "/etc/cni/net.d"
+	DefaultCNIDir        = "/opt/cni/bin"
+	DefaultMaxConfNum    = 1
+	VendorCNIDirTemplate = "%s/opt/%s/bin"
+	DefaultPrefix        = "eth"
+)
+
+type config struct {
+	pluginDirs       []string
+	pluginConfDir    string
+	pluginMaxConfNum int
+	prefix           string
+}
+
+type PortMapping struct {
+	HostPort      int32
+	ContainerPort int32
+	Protocol      string
+	HostIP        string
+}
+
+type IPRanges struct {
+	Subnet     string
+	RangeStart string
+	RangeEnd   string
+	Gateway    string
+}
+
+// BandWidth defines the ingress/egress rate and burst limits
+type BandWidth struct {
+	IngressRate  uint64
+	IngressBurst uint64
+	EgressRate   uint64
+	EgressBurst  uint64
+}
+
+// DNS defines the dns config
+type DNS struct {
+	// List of DNS servers of the cluster.
+	Servers []string
+	// List of DNS search domains of the cluster.
+	Searches []string
+	// List of DNS options.
+	Options []string
+}

+ 202 - 0
vendor/github.com/containernetworking/cni/LICENSE

@@ -0,0 +1,202 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+

+ 679 - 0
vendor/github.com/containernetworking/cni/libcni/api.go

@@ -0,0 +1,679 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package libcni
+
+// Note this is the actual implementation of the CNI specification, which
+// is reflected in the https://github.com/containernetworking/cni/blob/master/SPEC.md file
+// it is typically bundled into runtime providers (i.e. containerd or cri-o would use this
+// before calling runc or hcsshim).  It is also bundled into CNI providers as well, for example,
+// to add an IP to a container, to parse the configuration of the CNI and so on.
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/containernetworking/cni/pkg/invoke"
+	"github.com/containernetworking/cni/pkg/types"
+	"github.com/containernetworking/cni/pkg/types/create"
+	"github.com/containernetworking/cni/pkg/utils"
+	"github.com/containernetworking/cni/pkg/version"
+)
+
+var (
+	CacheDir = "/var/lib/cni"
+)
+
+const (
+	CNICacheV1 = "cniCacheV1"
+)
+
+// A RuntimeConf holds the arguments to one invocation of a CNI plugin
+// excepting the network configuration, with the nested exception that
+// the `runtimeConfig` from the network configuration is included
+// here.
+type RuntimeConf struct {
+	ContainerID string
+	NetNS       string
+	IfName      string
+	Args        [][2]string
+	// A dictionary of capability-specific data passed by the runtime
+	// to plugins as top-level keys in the 'runtimeConfig' dictionary
+	// of the plugin's stdin data.  libcni will ensure that only keys
+	// in this map which match the capabilities of the plugin are passed
+	// to the plugin
+	CapabilityArgs map[string]interface{}
+
+	// DEPRECATED. Will be removed in a future release.
+	CacheDir string
+}
+
+type NetworkConfig struct {
+	Network *types.NetConf
+	Bytes   []byte
+}
+
+type NetworkConfigList struct {
+	Name         string
+	CNIVersion   string
+	DisableCheck bool
+	Plugins      []*NetworkConfig
+	Bytes        []byte
+}
+
+type CNI interface {
+	AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
+	CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
+	DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
+	GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
+	GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error)
+
+	AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
+	CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
+	DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
+	GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
+	GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error)
+
+	ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error)
+	ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error)
+}
+
+type CNIConfig struct {
+	Path     []string
+	exec     invoke.Exec
+	cacheDir string
+}
+
+// CNIConfig implements the CNI interface
+var _ CNI = &CNIConfig{}
+
+// NewCNIConfig returns a new CNIConfig object that will search for plugins
+// in the given paths and use the given exec interface to run those plugins,
+// or if the exec interface is not given, will use a default exec handler.
+func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig {
+	return NewCNIConfigWithCacheDir(path, "", exec)
+}
+
+// NewCNIConfigWithCacheDir returns a new CNIConfig object that will search for plugins
+// in the given paths use the given exec interface to run those plugins,
+// or if the exec interface is not given, will use a default exec handler.
+// The given cache directory will be used for temporary data storage when needed.
+func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) *CNIConfig {
+	return &CNIConfig{
+		Path:     path,
+		cacheDir: cacheDir,
+		exec:     exec,
+	}
+}
+
+func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) {
+	var err error
+
+	inject := map[string]interface{}{
+		"name":       name,
+		"cniVersion": cniVersion,
+	}
+	// Add previous plugin result
+	if prevResult != nil {
+		inject["prevResult"] = prevResult
+	}
+
+	// Ensure every config uses the same name and version
+	orig, err = InjectConf(orig, inject)
+	if err != nil {
+		return nil, err
+	}
+
+	return injectRuntimeConfig(orig, rt)
+}
+
+// This function takes a libcni RuntimeConf structure and injects values into
+// a "runtimeConfig" dictionary in the CNI network configuration JSON that
+// will be passed to the plugin on stdin.
+//
+// Only "capabilities arguments" passed by the runtime are currently injected.
+// These capabilities arguments are filtered through the plugin's advertised
+// capabilities from its config JSON, and any keys in the CapabilityArgs
+// matching plugin capabilities are added to the "runtimeConfig" dictionary
+// sent to the plugin via JSON on stdin.  For example, if the plugin's
+// capabilities include "portMappings", and the CapabilityArgs map includes a
+// "portMappings" key, that key and its value are added to the "runtimeConfig"
+// dictionary to be passed to the plugin's stdin.
+func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) {
+	var err error
+
+	rc := make(map[string]interface{})
+	for capability, supported := range orig.Network.Capabilities {
+		if !supported {
+			continue
+		}
+		if data, ok := rt.CapabilityArgs[capability]; ok {
+			rc[capability] = data
+		}
+	}
+
+	if len(rc) > 0 {
+		orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc})
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return orig, nil
+}
+
+// ensure we have a usable exec if the CNIConfig was not given one
+func (c *CNIConfig) ensureExec() invoke.Exec {
+	if c.exec == nil {
+		c.exec = &invoke.DefaultExec{
+			RawExec:       &invoke.RawExec{Stderr: os.Stderr},
+			PluginDecoder: version.PluginDecoder{},
+		}
+	}
+	return c.exec
+}
+
+type cachedInfo struct {
+	Kind           string                 `json:"kind"`
+	ContainerID    string                 `json:"containerId"`
+	Config         []byte                 `json:"config"`
+	IfName         string                 `json:"ifName"`
+	NetworkName    string                 `json:"networkName"`
+	CniArgs        [][2]string            `json:"cniArgs,omitempty"`
+	CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"`
+	RawResult      map[string]interface{} `json:"result,omitempty"`
+	Result         types.Result           `json:"-"`
+}
+
+// getCacheDir returns the cache directory in this order:
+// 1) global cacheDir from CNIConfig object
+// 2) deprecated cacheDir from RuntimeConf object
+// 3) fall back to default cache directory
+func (c *CNIConfig) getCacheDir(rt *RuntimeConf) string {
+	if c.cacheDir != "" {
+		return c.cacheDir
+	}
+	if rt.CacheDir != "" {
+		return rt.CacheDir
+	}
+	return CacheDir
+}
+
+func (c *CNIConfig) getCacheFilePath(netName string, rt *RuntimeConf) (string, error) {
+	if netName == "" || rt.ContainerID == "" || rt.IfName == "" {
+		return "", fmt.Errorf("cache file path requires network name (%q), container ID (%q), and interface name (%q)", netName, rt.ContainerID, rt.IfName)
+	}
+	return filepath.Join(c.getCacheDir(rt), "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)), nil
+}
+
+func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, rt *RuntimeConf) error {
+	cached := cachedInfo{
+		Kind:           CNICacheV1,
+		ContainerID:    rt.ContainerID,
+		Config:         config,
+		IfName:         rt.IfName,
+		NetworkName:    netName,
+		CniArgs:        rt.Args,
+		CapabilityArgs: rt.CapabilityArgs,
+	}
+
+	// We need to get type.Result into cachedInfo as JSON map
+	// Marshal to []byte, then Unmarshal into cached.RawResult
+	data, err := json.Marshal(result)
+	if err != nil {
+		return err
+	}
+
+	err = json.Unmarshal(data, &cached.RawResult)
+	if err != nil {
+		return err
+	}
+
+	newBytes, err := json.Marshal(&cached)
+	if err != nil {
+		return err
+	}
+
+	fname, err := c.getCacheFilePath(netName, rt)
+	if err != nil {
+		return err
+	}
+	if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil {
+		return err
+	}
+
+	return ioutil.WriteFile(fname, newBytes, 0600)
+}
+
+func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error {
+	fname, err := c.getCacheFilePath(netName, rt)
+	if err != nil {
+		// Ignore error
+		return nil
+	}
+	return os.Remove(fname)
+}
+
+func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
+	var bytes []byte
+
+	fname, err := c.getCacheFilePath(netName, rt)
+	if err != nil {
+		return nil, nil, err
+	}
+	bytes, err = ioutil.ReadFile(fname)
+	if err != nil {
+		// Ignore read errors; the cached result may not exist on-disk
+		return nil, nil, nil
+	}
+
+	unmarshaled := cachedInfo{}
+	if err := json.Unmarshal(bytes, &unmarshaled); err != nil {
+		return nil, nil, fmt.Errorf("failed to unmarshal cached network %q config: %w", netName, err)
+	}
+	if unmarshaled.Kind != CNICacheV1 {
+		return nil, nil, fmt.Errorf("read cached network %q config has wrong kind: %v", netName, unmarshaled.Kind)
+	}
+
+	newRt := *rt
+	if unmarshaled.CniArgs != nil {
+		newRt.Args = unmarshaled.CniArgs
+	}
+	newRt.CapabilityArgs = unmarshaled.CapabilityArgs
+
+	return unmarshaled.Config, &newRt, nil
+}
+
+func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) {
+	fname, err := c.getCacheFilePath(netName, rt)
+	if err != nil {
+		return nil, err
+	}
+	data, err := ioutil.ReadFile(fname)
+	if err != nil {
+		// Ignore read errors; the cached result may not exist on-disk
+		return nil, nil
+	}
+
+	// Load the cached result
+	result, err := create.CreateFromBytes(data)
+	if err != nil {
+		return nil, err
+	}
+
+	// Convert to the config version to ensure plugins get prevResult
+	// in the same version as the config.  The cached result version
+	// should match the config version unless the config was changed
+	// while the container was running.
+	result, err = result.GetAsVersion(cniVersion)
+	if err != nil {
+		return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err)
+	}
+	return result, nil
+}
+
+func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) {
+	fname, err := c.getCacheFilePath(netName, rt)
+	if err != nil {
+		return nil, err
+	}
+	fdata, err := ioutil.ReadFile(fname)
+	if err != nil {
+		// Ignore read errors; the cached result may not exist on-disk
+		return nil, nil
+	}
+
+	cachedInfo := cachedInfo{}
+	if err := json.Unmarshal(fdata, &cachedInfo); err != nil || cachedInfo.Kind != CNICacheV1 {
+		return c.getLegacyCachedResult(netName, cniVersion, rt)
+	}
+
+	newBytes, err := json.Marshal(&cachedInfo.RawResult)
+	if err != nil {
+		return nil, fmt.Errorf("failed to marshal cached network %q config: %w", netName, err)
+	}
+
+	// Load the cached result
+	result, err := create.CreateFromBytes(newBytes)
+	if err != nil {
+		return nil, err
+	}
+
+	// Convert to the config version to ensure plugins get prevResult
+	// in the same version as the config.  The cached result version
+	// should match the config version unless the config was changed
+	// while the container was running.
+	result, err = result.GetAsVersion(cniVersion)
+	if err != nil {
+		return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err)
+	}
+	return result, nil
+}
+
+// GetNetworkListCachedResult returns the cached Result of the previous
+// AddNetworkList() operation for a network list, or an error.
+func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
+	return c.getCachedResult(list.Name, list.CNIVersion, rt)
+}
+
+// GetNetworkCachedResult returns the cached Result of the previous
+// AddNetwork() operation for a network, or an error.
+func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
+	return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+}
+
+// GetNetworkListCachedConfig copies the input RuntimeConf to output
+// RuntimeConf with fields updated with info from the cached Config.
+func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
+	return c.getCachedConfig(list.Name, rt)
+}
+
+// GetNetworkCachedConfig copies the input RuntimeConf to output
+// RuntimeConf with fields updated with info from the cached Config.
+func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
+	return c.getCachedConfig(net.Network.Name, rt)
+}
+
+func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) {
+	c.ensureExec()
+	pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
+	if err != nil {
+		return nil, err
+	}
+	if err := utils.ValidateContainerID(rt.ContainerID); err != nil {
+		return nil, err
+	}
+	if err := utils.ValidateNetworkName(name); err != nil {
+		return nil, err
+	}
+	if err := utils.ValidateInterfaceName(rt.IfName); err != nil {
+		return nil, err
+	}
+
+	newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
+	if err != nil {
+		return nil, err
+	}
+
+	return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec)
+}
+
+// AddNetworkList executes a sequence of plugins with the ADD command
+func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
+	var err error
+	var result types.Result
+	for _, net := range list.Plugins {
+		result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt)
+		if err != nil {
+			return nil, fmt.Errorf("plugin %s failed (add): %w", pluginDescription(net.Network), err)
+		}
+	}
+
+	if err = c.cacheAdd(result, list.Bytes, list.Name, rt); err != nil {
+		return nil, fmt.Errorf("failed to set network %q cached result: %w", list.Name, err)
+	}
+
+	return result, nil
+}
+
+func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
+	c.ensureExec()
+	pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
+	if err != nil {
+		return err
+	}
+
+	newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
+	if err != nil {
+		return err
+	}
+
+	return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec)
+}
+
+// CheckNetworkList executes a sequence of plugins with the CHECK command
+func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error {
+	// CHECK was added in CNI spec version 0.4.0 and higher
+	if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil {
+		return err
+	} else if !gtet {
+		return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion)
+	}
+
+	if list.DisableCheck {
+		return nil
+	}
+
+	cachedResult, err := c.getCachedResult(list.Name, list.CNIVersion, rt)
+	if err != nil {
+		return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err)
+	}
+
+	for _, net := range list.Plugins {
+		if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
+	c.ensureExec()
+	pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
+	if err != nil {
+		return err
+	}
+
+	newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
+	if err != nil {
+		return err
+	}
+
+	return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec)
+}
+
+// DelNetworkList executes a sequence of plugins with the DEL command
+func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error {
+	var cachedResult types.Result
+
+	// Cached result on DEL was added in CNI spec version 0.4.0 and higher
+	if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil {
+		return err
+	} else if gtet {
+		cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt)
+		if err != nil {
+			return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err)
+		}
+	}
+
+	for i := len(list.Plugins) - 1; i >= 0; i-- {
+		net := list.Plugins[i]
+		if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil {
+			return fmt.Errorf("plugin %s failed (delete): %w", pluginDescription(net.Network), err)
+		}
+	}
+	_ = c.cacheDel(list.Name, rt)
+
+	return nil
+}
+
+func pluginDescription(net *types.NetConf) string {
+	if net == nil {
+		return "<missing>"
+	}
+	pluginType := net.Type
+	out := fmt.Sprintf("type=%q", pluginType)
+	name := net.Name
+	if name != "" {
+		out += fmt.Sprintf(" name=%q", name)
+	}
+	return out
+}
+
+// AddNetwork executes the plugin with the ADD command
+func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
+	result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt)
+	if err != nil {
+		return nil, err
+	}
+
+	if err = c.cacheAdd(result, net.Bytes, net.Network.Name, rt); err != nil {
+		return nil, fmt.Errorf("failed to set network %q cached result: %w", net.Network.Name, err)
+	}
+
+	return result, nil
+}
+
+// CheckNetwork executes the plugin with the CHECK command
+func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error {
+	// CHECK was added in CNI spec version 0.4.0 and higher
+	if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil {
+		return err
+	} else if !gtet {
+		return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion)
+	}
+
+	cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+	if err != nil {
+		return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err)
+	}
+	return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt)
+}
+
+// DelNetwork executes the plugin with the DEL command
+func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error {
+	var cachedResult types.Result
+
+	// Cached result on DEL was added in CNI spec version 0.4.0 and higher
+	if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil {
+		return err
+	} else if gtet {
+		cachedResult, err = c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+		if err != nil {
+			return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err)
+		}
+	}
+
+	if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil {
+		return err
+	}
+	_ = c.cacheDel(net.Network.Name, rt)
+	return nil
+}
+
+// ValidateNetworkList checks that a configuration is reasonably valid.
+// - all the specified plugins exist on disk
+// - every plugin supports the desired version.
+//
+// Returns a list of all capabilities supported by the configuration, or error
+func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) {
+	version := list.CNIVersion
+
+	// holding map for seen caps (in case of duplicates)
+	caps := map[string]interface{}{}
+
+	errs := []error{}
+	for _, net := range list.Plugins {
+		if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil {
+			errs = append(errs, err)
+		}
+		for c, enabled := range net.Network.Capabilities {
+			if !enabled {
+				continue
+			}
+			caps[c] = struct{}{}
+		}
+	}
+
+	if len(errs) > 0 {
+		return nil, fmt.Errorf("%v", errs)
+	}
+
+	// make caps list
+	cc := make([]string, 0, len(caps))
+	for c := range caps {
+		cc = append(cc, c)
+	}
+
+	return cc, nil
+}
+
+// ValidateNetwork checks that a configuration is reasonably valid.
+// It uses the same logic as ValidateNetworkList)
+// Returns a list of capabilities
+func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) {
+	caps := []string{}
+	for c, ok := range net.Network.Capabilities {
+		if ok {
+			caps = append(caps, c)
+		}
+	}
+	if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil {
+		return nil, err
+	}
+	return caps, nil
+}
+
+// validatePlugin checks that an individual plugin's configuration is sane
+func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error {
+	c.ensureExec()
+	pluginPath, err := c.exec.FindInPath(pluginName, c.Path)
+	if err != nil {
+		return err
+	}
+	if expectedVersion == "" {
+		expectedVersion = "0.1.0"
+	}
+
+	vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec)
+	if err != nil {
+		return err
+	}
+	for _, vers := range vi.SupportedVersions() {
+		if vers == expectedVersion {
+			return nil
+		}
+	}
+	return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion)
+}
+
+// GetVersionInfo reports which versions of the CNI spec are supported by
+// the given plugin.
+func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) {
+	c.ensureExec()
+	pluginPath, err := c.exec.FindInPath(pluginType, c.Path)
+	if err != nil {
+		return nil, err
+	}
+
+	return invoke.GetVersionInfo(ctx, pluginPath, c.exec)
+}
+
+// =====
+func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args {
+	return &invoke.Args{
+		Command:     action,
+		ContainerID: rt.ContainerID,
+		NetNS:       rt.NetNS,
+		PluginArgs:  rt.Args,
+		IfName:      rt.IfName,
+		Path:        strings.Join(c.Path, string(os.PathListSeparator)),
+	}
+}

+ 270 - 0
vendor/github.com/containernetworking/cni/libcni/conf.go

@@ -0,0 +1,270 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package libcni
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sort"
+
+	"github.com/containernetworking/cni/pkg/types"
+)
+
+type NotFoundError struct {
+	Dir  string
+	Name string
+}
+
+func (e NotFoundError) Error() string {
+	return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir)
+}
+
+type NoConfigsFoundError struct {
+	Dir string
+}
+
+func (e NoConfigsFoundError) Error() string {
+	return fmt.Sprintf(`no net configurations found in %s`, e.Dir)
+}
+
+func ConfFromBytes(bytes []byte) (*NetworkConfig, error) {
+	conf := &NetworkConfig{Bytes: bytes, Network: &types.NetConf{}}
+	if err := json.Unmarshal(bytes, conf.Network); err != nil {
+		return nil, fmt.Errorf("error parsing configuration: %w", err)
+	}
+	if conf.Network.Type == "" {
+		return nil, fmt.Errorf("error parsing configuration: missing 'type'")
+	}
+	return conf, nil
+}
+
+func ConfFromFile(filename string) (*NetworkConfig, error) {
+	bytes, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, fmt.Errorf("error reading %s: %w", filename, err)
+	}
+	return ConfFromBytes(bytes)
+}
+
+func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
+	rawList := make(map[string]interface{})
+	if err := json.Unmarshal(bytes, &rawList); err != nil {
+		return nil, fmt.Errorf("error parsing configuration list: %w", err)
+	}
+
+	rawName, ok := rawList["name"]
+	if !ok {
+		return nil, fmt.Errorf("error parsing configuration list: no name")
+	}
+	name, ok := rawName.(string)
+	if !ok {
+		return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName)
+	}
+
+	var cniVersion string
+	rawVersion, ok := rawList["cniVersion"]
+	if ok {
+		cniVersion, ok = rawVersion.(string)
+		if !ok {
+			return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion)
+		}
+	}
+
+	disableCheck := false
+	if rawDisableCheck, ok := rawList["disableCheck"]; ok {
+		disableCheck, ok = rawDisableCheck.(bool)
+		if !ok {
+			return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck)
+		}
+	}
+
+	list := &NetworkConfigList{
+		Name:         name,
+		DisableCheck: disableCheck,
+		CNIVersion:   cniVersion,
+		Bytes:        bytes,
+	}
+
+	var plugins []interface{}
+	plug, ok := rawList["plugins"]
+	if !ok {
+		return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key")
+	}
+	plugins, ok = plug.([]interface{})
+	if !ok {
+		return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug)
+	}
+	if len(plugins) == 0 {
+		return nil, fmt.Errorf("error parsing configuration list: no plugins in list")
+	}
+
+	for i, conf := range plugins {
+		newBytes, err := json.Marshal(conf)
+		if err != nil {
+			return nil, fmt.Errorf("failed to marshal plugin config %d: %w", i, err)
+		}
+		netConf, err := ConfFromBytes(newBytes)
+		if err != nil {
+			return nil, fmt.Errorf("failed to parse plugin config %d: %w", i, err)
+		}
+		list.Plugins = append(list.Plugins, netConf)
+	}
+
+	return list, nil
+}
+
+func ConfListFromFile(filename string) (*NetworkConfigList, error) {
+	bytes, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, fmt.Errorf("error reading %s: %w", filename, err)
+	}
+	return ConfListFromBytes(bytes)
+}
+
+func ConfFiles(dir string, extensions []string) ([]string, error) {
+	// In part, adapted from rkt/networking/podenv.go#listFiles
+	files, err := ioutil.ReadDir(dir)
+	switch {
+	case err == nil: // break
+	case os.IsNotExist(err):
+		return nil, nil
+	default:
+		return nil, err
+	}
+
+	confFiles := []string{}
+	for _, f := range files {
+		if f.IsDir() {
+			continue
+		}
+		fileExt := filepath.Ext(f.Name())
+		for _, ext := range extensions {
+			if fileExt == ext {
+				confFiles = append(confFiles, filepath.Join(dir, f.Name()))
+			}
+		}
+	}
+	return confFiles, nil
+}
+
+func LoadConf(dir, name string) (*NetworkConfig, error) {
+	files, err := ConfFiles(dir, []string{".conf", ".json"})
+	switch {
+	case err != nil:
+		return nil, err
+	case len(files) == 0:
+		return nil, NoConfigsFoundError{Dir: dir}
+	}
+	sort.Strings(files)
+
+	for _, confFile := range files {
+		conf, err := ConfFromFile(confFile)
+		if err != nil {
+			return nil, err
+		}
+		if conf.Network.Name == name {
+			return conf, nil
+		}
+	}
+	return nil, NotFoundError{dir, name}
+}
+
+func LoadConfList(dir, name string) (*NetworkConfigList, error) {
+	files, err := ConfFiles(dir, []string{".conflist"})
+	if err != nil {
+		return nil, err
+	}
+	sort.Strings(files)
+
+	for _, confFile := range files {
+		conf, err := ConfListFromFile(confFile)
+		if err != nil {
+			return nil, err
+		}
+		if conf.Name == name {
+			return conf, nil
+		}
+	}
+
+	// Try and load a network configuration file (instead of list)
+	// from the same name, then upconvert.
+	singleConf, err := LoadConf(dir, name)
+	if err != nil {
+		// A little extra logic so the error makes sense
+		if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok {
+			// Config lists found but no config files found
+			return nil, NotFoundError{dir, name}
+		}
+
+		return nil, err
+	}
+	return ConfListFromConf(singleConf)
+}
+
+func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) {
+	config := make(map[string]interface{})
+	err := json.Unmarshal(original.Bytes, &config)
+	if err != nil {
+		return nil, fmt.Errorf("unmarshal existing network bytes: %w", err)
+	}
+
+	for key, value := range newValues {
+		if key == "" {
+			return nil, fmt.Errorf("keys cannot be empty")
+		}
+
+		if value == nil {
+			return nil, fmt.Errorf("key '%s' value must not be nil", key)
+		}
+
+		config[key] = value
+	}
+
+	newBytes, err := json.Marshal(config)
+	if err != nil {
+		return nil, err
+	}
+
+	return ConfFromBytes(newBytes)
+}
+
+// ConfListFromConf "upconverts" a network config in to a NetworkConfigList,
+// with the single network as the only entry in the list.
+func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) {
+	// Re-deserialize the config's json, then make a raw map configlist.
+	// This may seem a bit strange, but it's to make the Bytes fields
+	// actually make sense. Otherwise, the generated json is littered with
+	// golang default values.
+
+	rawConfig := make(map[string]interface{})
+	if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil {
+		return nil, err
+	}
+
+	rawConfigList := map[string]interface{}{
+		"name":       original.Network.Name,
+		"cniVersion": original.Network.CNIVersion,
+		"plugins":    []interface{}{rawConfig},
+	}
+
+	b, err := json.Marshal(rawConfigList)
+	if err != nil {
+		return nil, err
+	}
+	return ConfListFromBytes(b)
+}

+ 128 - 0
vendor/github.com/containernetworking/cni/pkg/invoke/args.go

@@ -0,0 +1,128 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+	"fmt"
+	"os"
+	"strings"
+)
+
+type CNIArgs interface {
+	// For use with os/exec; i.e., return nil to inherit the
+	// environment from this process
+	// For use in delegation; inherit the environment from this
+	// process and allow overrides
+	AsEnv() []string
+}
+
+type inherited struct{}
+
+var inheritArgsFromEnv inherited
+
+func (*inherited) AsEnv() []string {
+	return nil
+}
+
+func ArgsFromEnv() CNIArgs {
+	return &inheritArgsFromEnv
+}
+
+type Args struct {
+	Command       string
+	ContainerID   string
+	NetNS         string
+	PluginArgs    [][2]string
+	PluginArgsStr string
+	IfName        string
+	Path          string
+}
+
+// Args implements the CNIArgs interface
+var _ CNIArgs = &Args{}
+
+func (args *Args) AsEnv() []string {
+	env := os.Environ()
+	pluginArgsStr := args.PluginArgsStr
+	if pluginArgsStr == "" {
+		pluginArgsStr = stringify(args.PluginArgs)
+	}
+
+	// Duplicated values which come first will be overridden, so we must put the
+	// custom values in the end to avoid being overridden by the process environments.
+	env = append(env,
+		"CNI_COMMAND="+args.Command,
+		"CNI_CONTAINERID="+args.ContainerID,
+		"CNI_NETNS="+args.NetNS,
+		"CNI_ARGS="+pluginArgsStr,
+		"CNI_IFNAME="+args.IfName,
+		"CNI_PATH="+args.Path,
+	)
+	return dedupEnv(env)
+}
+
+// taken from rkt/networking/net_plugin.go
+func stringify(pluginArgs [][2]string) string {
+	entries := make([]string, len(pluginArgs))
+
+	for i, kv := range pluginArgs {
+		entries[i] = strings.Join(kv[:], "=")
+	}
+
+	return strings.Join(entries, ";")
+}
+
+// DelegateArgs implements the CNIArgs interface
+// used for delegation to inherit from environments
+// and allow some overrides like CNI_COMMAND
+var _ CNIArgs = &DelegateArgs{}
+
+type DelegateArgs struct {
+	Command string
+}
+
+func (d *DelegateArgs) AsEnv() []string {
+	env := os.Environ()
+
+	// The custom values should come in the end to override the existing
+	// process environment of the same key.
+	env = append(env,
+		"CNI_COMMAND="+d.Command,
+	)
+	return dedupEnv(env)
+}
+
+// dedupEnv returns a copy of env with any duplicates removed, in favor of later values.
+// Items not of the normal environment "key=value" form are preserved unchanged.
+func dedupEnv(env []string) []string {
+	out := make([]string, 0, len(env))
+	envMap := map[string]string{}
+
+	for _, kv := range env {
+		// find the first "=" in environment, if not, just keep it
+		eq := strings.Index(kv, "=")
+		if eq < 0 {
+			out = append(out, kv)
+			continue
+		}
+		envMap[kv[:eq]] = kv[eq+1:]
+	}
+
+	for k, v := range envMap {
+		out = append(out, fmt.Sprintf("%s=%s", k, v))
+	}
+
+	return out
+}

+ 80 - 0
vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go

@@ -0,0 +1,80 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+	"context"
+	"os"
+	"path/filepath"
+
+	"github.com/containernetworking/cni/pkg/types"
+)
+
+func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) {
+	if exec == nil {
+		exec = defaultExec
+	}
+
+	paths := filepath.SplitList(os.Getenv("CNI_PATH"))
+	pluginPath, err := exec.FindInPath(delegatePlugin, paths)
+	if err != nil {
+		return "", nil, err
+	}
+
+	return pluginPath, exec, nil
+}
+
+// DelegateAdd calls the given delegate plugin with the CNI ADD action and
+// JSON configuration
+func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) {
+	pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
+	if err != nil {
+		return nil, err
+	}
+
+	// DelegateAdd will override the original "CNI_COMMAND" env from process with ADD
+	return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec)
+}
+
+// DelegateCheck calls the given delegate plugin with the CNI CHECK action and
+// JSON configuration
+func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
+	pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
+	if err != nil {
+		return err
+	}
+
+	// DelegateCheck will override the original CNI_COMMAND env from process with CHECK
+	return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec)
+}
+
+// DelegateDel calls the given delegate plugin with the CNI DEL action and
+// JSON configuration
+func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
+	pluginPath, realExec, err := delegateCommon(delegatePlugin, exec)
+	if err != nil {
+		return err
+	}
+
+	// DelegateDel will override the original CNI_COMMAND env from process with DEL
+	return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec)
+}
+
+// return CNIArgs used by delegation
+func delegateArgs(action string) *DelegateArgs {
+	return &DelegateArgs{
+		Command: action,
+	}
+}

+ 181 - 0
vendor/github.com/containernetworking/cni/pkg/invoke/exec.go

@@ -0,0 +1,181 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"os"
+
+	"github.com/containernetworking/cni/pkg/types"
+	"github.com/containernetworking/cni/pkg/types/create"
+	"github.com/containernetworking/cni/pkg/version"
+)
+
+// Exec is an interface encapsulates all operations that deal with finding
+// and executing a CNI plugin. Tests may provide a fake implementation
+// to avoid writing fake plugins to temporary directories during the test.
+type Exec interface {
+	ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error)
+	FindInPath(plugin string, paths []string) (string, error)
+	Decode(jsonBytes []byte) (version.PluginInfo, error)
+}
+
+// Plugin must return result in same version as specified in netconf; but
+// for backwards compatibility reasons if the result version is empty use
+// config version (rather than technically correct 0.1.0).
+// https://github.com/containernetworking/cni/issues/895
+func fixupResultVersion(netconf, result []byte) (string, []byte, error) {
+	versionDecoder := &version.ConfigDecoder{}
+	confVersion, err := versionDecoder.Decode(netconf)
+	if err != nil {
+		return "", nil, err
+	}
+
+	var rawResult map[string]interface{}
+	if err := json.Unmarshal(result, &rawResult); err != nil {
+		return "", nil, fmt.Errorf("failed to unmarshal raw result: %w", err)
+	}
+
+	// Manually decode Result version; we need to know whether its cniVersion
+	// is empty, while built-in decoders (correctly) substitute 0.1.0 for an
+	// empty version per the CNI spec.
+	if resultVerRaw, ok := rawResult["cniVersion"]; ok {
+		resultVer, ok := resultVerRaw.(string)
+		if ok && resultVer != "" {
+			return resultVer, result, nil
+		}
+	}
+
+	// If the cniVersion is not present or empty, assume the result is
+	// the same CNI spec version as the config
+	rawResult["cniVersion"] = confVersion
+	newBytes, err := json.Marshal(rawResult)
+	if err != nil {
+		return "", nil, fmt.Errorf("failed to remarshal fixed result: %w", err)
+	}
+
+	return confVersion, newBytes, nil
+}
+
+// For example, a testcase could pass an instance of the following fakeExec
+// object to ExecPluginWithResult() to verify the incoming stdin and environment
+// and provide a tailored response:
+//
+//import (
+//	"encoding/json"
+//	"path"
+//	"strings"
+//)
+//
+//type fakeExec struct {
+//	version.PluginDecoder
+//}
+//
+//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
+//	net := &types.NetConf{}
+//	err := json.Unmarshal(stdinData, net)
+//	if err != nil {
+//		return nil, fmt.Errorf("failed to unmarshal configuration: %v", err)
+//	}
+//	pluginName := path.Base(pluginPath)
+//	if pluginName != net.Type {
+//		return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type)
+//	}
+//	for _, e := range environ {
+//		// Check environment for forced failure request
+//		parts := strings.Split(e, "=")
+//		if len(parts) > 0 && parts[0] == "FAIL" {
+//			return nil, fmt.Errorf("failed to execute plugin %s", pluginName)
+//		}
+//	}
+//	return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil
+//}
+//
+//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) {
+//	if len(paths) > 0 {
+//		return path.Join(paths[0], plugin), nil
+//	}
+//	return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths)
+//}
+
+func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) {
+	if exec == nil {
+		exec = defaultExec
+	}
+
+	stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv())
+	if err != nil {
+		return nil, err
+	}
+
+	resultVersion, fixedBytes, err := fixupResultVersion(netconf, stdoutBytes)
+	if err != nil {
+		return nil, err
+	}
+
+	return create.Create(resultVersion, fixedBytes)
+}
+
+func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error {
+	if exec == nil {
+		exec = defaultExec
+	}
+	_, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv())
+	return err
+}
+
+// GetVersionInfo returns the version information available about the plugin.
+// For recent-enough plugins, it uses the information returned by the VERSION
+// command.  For older plugins which do not recognize that command, it reports
+// version 0.1.0
+func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) {
+	if exec == nil {
+		exec = defaultExec
+	}
+	args := &Args{
+		Command: "VERSION",
+
+		// set fake values required by plugins built against an older version of skel
+		NetNS:  "dummy",
+		IfName: "dummy",
+		Path:   "dummy",
+	}
+	stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current()))
+	stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv())
+	if err != nil {
+		if err.Error() == "unknown CNI_COMMAND: VERSION" {
+			return version.PluginSupports("0.1.0"), nil
+		}
+		return nil, err
+	}
+
+	return exec.Decode(stdoutBytes)
+}
+
+// DefaultExec is an object that implements the Exec interface which looks
+// for and executes plugins from disk.
+type DefaultExec struct {
+	*RawExec
+	version.PluginDecoder
+}
+
+// DefaultExec implements the Exec interface
+var _ Exec = &DefaultExec{}
+
+var defaultExec = &DefaultExec{
+	RawExec: &RawExec{Stderr: os.Stderr},
+}

+ 48 - 0
vendor/github.com/containernetworking/cni/pkg/invoke/find.go

@@ -0,0 +1,48 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+)
+
+// FindInPath returns the full path of the plugin by searching in the provided path
+func FindInPath(plugin string, paths []string) (string, error) {
+	if plugin == "" {
+		return "", fmt.Errorf("no plugin name provided")
+	}
+
+	if strings.ContainsRune(plugin, os.PathSeparator) {
+		return "", fmt.Errorf("invalid plugin name: %s", plugin)
+	}
+
+	if len(paths) == 0 {
+		return "", fmt.Errorf("no paths provided")
+	}
+
+	for _, path := range paths {
+		for _, fe := range ExecutableFileExtensions {
+			fullpath := filepath.Join(path, plugin) + fe
+			if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() {
+				return fullpath, nil
+			}
+		}
+	}
+
+	return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths)
+}

+ 20 - 0
vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go

@@ -0,0 +1,20 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package invoke
+
+// Valid file extensions for plugin executables.
+var ExecutableFileExtensions = []string{""}

+ 18 - 0
vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go

@@ -0,0 +1,18 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+// Valid file extensions for plugin executables.
+var ExecutableFileExtensions = []string{".exe", ""}

+ 88 - 0
vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go

@@ -0,0 +1,88 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package invoke
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os/exec"
+	"strings"
+	"time"
+
+	"github.com/containernetworking/cni/pkg/types"
+)
+
+type RawExec struct {
+	Stderr io.Writer
+}
+
+func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
+	stdout := &bytes.Buffer{}
+	stderr := &bytes.Buffer{}
+	c := exec.CommandContext(ctx, pluginPath)
+	c.Env = environ
+	c.Stdin = bytes.NewBuffer(stdinData)
+	c.Stdout = stdout
+	c.Stderr = stderr
+
+	// Retry the command on "text file busy" errors
+	for i := 0; i <= 5; i++ {
+		err := c.Run()
+
+		// Command succeeded
+		if err == nil {
+			break
+		}
+
+		// If the plugin is currently about to be written, then we wait a
+		// second and try it again
+		if strings.Contains(err.Error(), "text file busy") {
+			time.Sleep(time.Second)
+			continue
+		}
+
+		// All other errors except than the busy text file
+		return nil, e.pluginErr(err, stdout.Bytes(), stderr.Bytes())
+	}
+
+	// Copy stderr to caller's buffer in case plugin printed to both
+	// stdout and stderr for some reason. Ignore failures as stderr is
+	// only informational.
+	if e.Stderr != nil && stderr.Len() > 0 {
+		_, _ = stderr.WriteTo(e.Stderr)
+	}
+	return stdout.Bytes(), nil
+}
+
+func (e *RawExec) pluginErr(err error, stdout, stderr []byte) error {
+	emsg := types.Error{}
+	if len(stdout) == 0 {
+		if len(stderr) == 0 {
+			emsg.Msg = fmt.Sprintf("netplugin failed with no error message: %v", err)
+		} else {
+			emsg.Msg = fmt.Sprintf("netplugin failed: %q", string(stderr))
+		}
+	} else if perr := json.Unmarshal(stdout, &emsg); perr != nil {
+		emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(stdout), perr)
+	}
+	return &emsg
+}
+
+func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) {
+	return FindInPath(plugin, paths)
+}

+ 189 - 0
vendor/github.com/containernetworking/cni/pkg/types/020/types.go

@@ -0,0 +1,189 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types020
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"os"
+
+	"github.com/containernetworking/cni/pkg/types"
+	convert "github.com/containernetworking/cni/pkg/types/internal"
+)
+
+const ImplementedSpecVersion string = "0.2.0"
+
+var supportedVersions = []string{"", "0.1.0", ImplementedSpecVersion}
+
+// Register converters for all versions less than the implemented spec version
+func init() {
+	convert.RegisterConverter("0.1.0", []string{ImplementedSpecVersion}, convertFrom010)
+	convert.RegisterConverter(ImplementedSpecVersion, []string{"0.1.0"}, convertTo010)
+
+	// Creator
+	convert.RegisterCreator(supportedVersions, NewResult)
+}
+
+// Compatibility types for CNI version 0.1.0 and 0.2.0
+
+// NewResult creates a new Result object from JSON data. The JSON data
+// must be compatible with the CNI versions implemented by this type.
+func NewResult(data []byte) (types.Result, error) {
+	result := &Result{}
+	if err := json.Unmarshal(data, result); err != nil {
+		return nil, err
+	}
+	for _, v := range supportedVersions {
+		if result.CNIVersion == v {
+			if result.CNIVersion == "" {
+				result.CNIVersion = "0.1.0"
+			}
+			return result, nil
+		}
+	}
+	return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q",
+		supportedVersions, result.CNIVersion)
+}
+
+// GetResult converts the given Result object to the ImplementedSpecVersion
+// and returns the concrete type or an error
+func GetResult(r types.Result) (*Result, error) {
+	result020, err := convert.Convert(r, ImplementedSpecVersion)
+	if err != nil {
+		return nil, err
+	}
+	result, ok := result020.(*Result)
+	if !ok {
+		return nil, fmt.Errorf("failed to convert result")
+	}
+	return result, nil
+}
+
+func convertFrom010(from types.Result, toVersion string) (types.Result, error) {
+	if toVersion != "0.2.0" {
+		panic("only converts to version 0.2.0")
+	}
+	fromResult := from.(*Result)
+	return &Result{
+		CNIVersion: ImplementedSpecVersion,
+		IP4:        fromResult.IP4.Copy(),
+		IP6:        fromResult.IP6.Copy(),
+		DNS:        *fromResult.DNS.Copy(),
+	}, nil
+}
+
+func convertTo010(from types.Result, toVersion string) (types.Result, error) {
+	if toVersion != "0.1.0" {
+		panic("only converts to version 0.1.0")
+	}
+	fromResult := from.(*Result)
+	return &Result{
+		CNIVersion: "0.1.0",
+		IP4:        fromResult.IP4.Copy(),
+		IP6:        fromResult.IP6.Copy(),
+		DNS:        *fromResult.DNS.Copy(),
+	}, nil
+}
+
+// Result is what gets returned from the plugin (via stdout) to the caller
+type Result struct {
+	CNIVersion string    `json:"cniVersion,omitempty"`
+	IP4        *IPConfig `json:"ip4,omitempty"`
+	IP6        *IPConfig `json:"ip6,omitempty"`
+	DNS        types.DNS `json:"dns,omitempty"`
+}
+
+func (r *Result) Version() string {
+	return r.CNIVersion
+}
+
+func (r *Result) GetAsVersion(version string) (types.Result, error) {
+	// If the creator of the result did not set the CNIVersion, assume it
+	// should be the highest spec version implemented by this Result
+	if r.CNIVersion == "" {
+		r.CNIVersion = ImplementedSpecVersion
+	}
+	return convert.Convert(r, version)
+}
+
+func (r *Result) Print() error {
+	return r.PrintTo(os.Stdout)
+}
+
+func (r *Result) PrintTo(writer io.Writer) error {
+	data, err := json.MarshalIndent(r, "", "    ")
+	if err != nil {
+		return err
+	}
+	_, err = writer.Write(data)
+	return err
+}
+
+// IPConfig contains values necessary to configure an interface
+type IPConfig struct {
+	IP      net.IPNet
+	Gateway net.IP
+	Routes  []types.Route
+}
+
+func (i *IPConfig) Copy() *IPConfig {
+	if i == nil {
+		return nil
+	}
+
+	var routes []types.Route
+	for _, fromRoute := range i.Routes {
+		routes = append(routes, *fromRoute.Copy())
+	}
+	return &IPConfig{
+		IP:      i.IP,
+		Gateway: i.Gateway,
+		Routes:  routes,
+	}
+}
+
+// net.IPNet is not JSON (un)marshallable so this duality is needed
+// for our custom IPNet type
+
+// JSON (un)marshallable types
+type ipConfig struct {
+	IP      types.IPNet   `json:"ip"`
+	Gateway net.IP        `json:"gateway,omitempty"`
+	Routes  []types.Route `json:"routes,omitempty"`
+}
+
+func (c *IPConfig) MarshalJSON() ([]byte, error) {
+	ipc := ipConfig{
+		IP:      types.IPNet(c.IP),
+		Gateway: c.Gateway,
+		Routes:  c.Routes,
+	}
+
+	return json.Marshal(ipc)
+}
+
+func (c *IPConfig) UnmarshalJSON(data []byte) error {
+	ipc := ipConfig{}
+	if err := json.Unmarshal(data, &ipc); err != nil {
+		return err
+	}
+
+	c.IP = net.IPNet(ipc.IP)
+	c.Gateway = ipc.Gateway
+	c.Routes = ipc.Routes
+	return nil
+}

+ 306 - 0
vendor/github.com/containernetworking/cni/pkg/types/040/types.go

@@ -0,0 +1,306 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types040
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"os"
+
+	"github.com/containernetworking/cni/pkg/types"
+	types020 "github.com/containernetworking/cni/pkg/types/020"
+	convert "github.com/containernetworking/cni/pkg/types/internal"
+)
+
+const ImplementedSpecVersion string = "0.4.0"
+
+var supportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion}
+
+// Register converters for all versions less than the implemented spec version
+func init() {
+	// Up-converters
+	convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x)
+	convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x)
+	convert.RegisterConverter("0.3.0", supportedVersions, convertInternal)
+	convert.RegisterConverter("0.3.1", supportedVersions, convertInternal)
+
+	// Down-converters
+	convert.RegisterConverter("0.4.0", []string{"0.3.0", "0.3.1"}, convertInternal)
+	convert.RegisterConverter("0.4.0", []string{"0.1.0", "0.2.0"}, convertTo02x)
+	convert.RegisterConverter("0.3.1", []string{"0.1.0", "0.2.0"}, convertTo02x)
+	convert.RegisterConverter("0.3.0", []string{"0.1.0", "0.2.0"}, convertTo02x)
+
+	// Creator
+	convert.RegisterCreator(supportedVersions, NewResult)
+}
+
+func NewResult(data []byte) (types.Result, error) {
+	result := &Result{}
+	if err := json.Unmarshal(data, result); err != nil {
+		return nil, err
+	}
+	for _, v := range supportedVersions {
+		if result.CNIVersion == v {
+			return result, nil
+		}
+	}
+	return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q",
+		supportedVersions, result.CNIVersion)
+}
+
+func GetResult(r types.Result) (*Result, error) {
+	resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion)
+	if err != nil {
+		return nil, err
+	}
+	result, ok := resultCurrent.(*Result)
+	if !ok {
+		return nil, fmt.Errorf("failed to convert result")
+	}
+	return result, nil
+}
+
+func NewResultFromResult(result types.Result) (*Result, error) {
+	newResult, err := convert.Convert(result, ImplementedSpecVersion)
+	if err != nil {
+		return nil, err
+	}
+	return newResult.(*Result), nil
+}
+
+// Result is what gets returned from the plugin (via stdout) to the caller
+type Result struct {
+	CNIVersion string         `json:"cniVersion,omitempty"`
+	Interfaces []*Interface   `json:"interfaces,omitempty"`
+	IPs        []*IPConfig    `json:"ips,omitempty"`
+	Routes     []*types.Route `json:"routes,omitempty"`
+	DNS        types.DNS      `json:"dns,omitempty"`
+}
+
+func convert020IPConfig(from *types020.IPConfig, ipVersion string) *IPConfig {
+	return &IPConfig{
+		Version: ipVersion,
+		Address: from.IP,
+		Gateway: from.Gateway,
+	}
+}
+
+func convertFrom02x(from types.Result, toVersion string) (types.Result, error) {
+	fromResult := from.(*types020.Result)
+	toResult := &Result{
+		CNIVersion: toVersion,
+		DNS:        *fromResult.DNS.Copy(),
+		Routes:     []*types.Route{},
+	}
+	if fromResult.IP4 != nil {
+		toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP4, "4"))
+		for _, fromRoute := range fromResult.IP4.Routes {
+			toResult.Routes = append(toResult.Routes, fromRoute.Copy())
+		}
+	}
+
+	if fromResult.IP6 != nil {
+		toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP6, "6"))
+		for _, fromRoute := range fromResult.IP6.Routes {
+			toResult.Routes = append(toResult.Routes, fromRoute.Copy())
+		}
+	}
+
+	return toResult, nil
+}
+
+func convertInternal(from types.Result, toVersion string) (types.Result, error) {
+	fromResult := from.(*Result)
+	toResult := &Result{
+		CNIVersion: toVersion,
+		DNS:        *fromResult.DNS.Copy(),
+		Routes:     []*types.Route{},
+	}
+	for _, fromIntf := range fromResult.Interfaces {
+		toResult.Interfaces = append(toResult.Interfaces, fromIntf.Copy())
+	}
+	for _, fromIPC := range fromResult.IPs {
+		toResult.IPs = append(toResult.IPs, fromIPC.Copy())
+	}
+	for _, fromRoute := range fromResult.Routes {
+		toResult.Routes = append(toResult.Routes, fromRoute.Copy())
+	}
+	return toResult, nil
+}
+
+func convertTo02x(from types.Result, toVersion string) (types.Result, error) {
+	fromResult := from.(*Result)
+	toResult := &types020.Result{
+		CNIVersion: toVersion,
+		DNS:        *fromResult.DNS.Copy(),
+	}
+
+	for _, fromIP := range fromResult.IPs {
+		// Only convert the first IP address of each version as 0.2.0
+		// and earlier cannot handle multiple IP addresses
+		if fromIP.Version == "4" && toResult.IP4 == nil {
+			toResult.IP4 = &types020.IPConfig{
+				IP:      fromIP.Address,
+				Gateway: fromIP.Gateway,
+			}
+		} else if fromIP.Version == "6" && toResult.IP6 == nil {
+			toResult.IP6 = &types020.IPConfig{
+				IP:      fromIP.Address,
+				Gateway: fromIP.Gateway,
+			}
+		}
+		if toResult.IP4 != nil && toResult.IP6 != nil {
+			break
+		}
+	}
+
+	for _, fromRoute := range fromResult.Routes {
+		is4 := fromRoute.Dst.IP.To4() != nil
+		if is4 && toResult.IP4 != nil {
+			toResult.IP4.Routes = append(toResult.IP4.Routes, types.Route{
+				Dst: fromRoute.Dst,
+				GW:  fromRoute.GW,
+			})
+		} else if !is4 && toResult.IP6 != nil {
+			toResult.IP6.Routes = append(toResult.IP6.Routes, types.Route{
+				Dst: fromRoute.Dst,
+				GW:  fromRoute.GW,
+			})
+		}
+	}
+
+	// 0.2.0 and earlier require at least one IP address in the Result
+	if toResult.IP4 == nil && toResult.IP6 == nil {
+		return nil, fmt.Errorf("cannot convert: no valid IP addresses")
+	}
+
+	return toResult, nil
+}
+
+func (r *Result) Version() string {
+	return r.CNIVersion
+}
+
+func (r *Result) GetAsVersion(version string) (types.Result, error) {
+	// If the creator of the result did not set the CNIVersion, assume it
+	// should be the highest spec version implemented by this Result
+	if r.CNIVersion == "" {
+		r.CNIVersion = ImplementedSpecVersion
+	}
+	return convert.Convert(r, version)
+}
+
+func (r *Result) Print() error {
+	return r.PrintTo(os.Stdout)
+}
+
+func (r *Result) PrintTo(writer io.Writer) error {
+	data, err := json.MarshalIndent(r, "", "    ")
+	if err != nil {
+		return err
+	}
+	_, err = writer.Write(data)
+	return err
+}
+
+// Interface contains values about the created interfaces
+type Interface struct {
+	Name    string `json:"name"`
+	Mac     string `json:"mac,omitempty"`
+	Sandbox string `json:"sandbox,omitempty"`
+}
+
+func (i *Interface) String() string {
+	return fmt.Sprintf("%+v", *i)
+}
+
+func (i *Interface) Copy() *Interface {
+	if i == nil {
+		return nil
+	}
+	newIntf := *i
+	return &newIntf
+}
+
+// Int returns a pointer to the int value passed in.  Used to
+// set the IPConfig.Interface field.
+func Int(v int) *int {
+	return &v
+}
+
+// IPConfig contains values necessary to configure an IP address on an interface
+type IPConfig struct {
+	// IP version, either "4" or "6"
+	Version string
+	// Index into Result structs Interfaces list
+	Interface *int
+	Address   net.IPNet
+	Gateway   net.IP
+}
+
+func (i *IPConfig) String() string {
+	return fmt.Sprintf("%+v", *i)
+}
+
+func (i *IPConfig) Copy() *IPConfig {
+	if i == nil {
+		return nil
+	}
+
+	ipc := &IPConfig{
+		Version: i.Version,
+		Address: i.Address,
+		Gateway: i.Gateway,
+	}
+	if i.Interface != nil {
+		intf := *i.Interface
+		ipc.Interface = &intf
+	}
+	return ipc
+}
+
+// JSON (un)marshallable types
+type ipConfig struct {
+	Version   string      `json:"version"`
+	Interface *int        `json:"interface,omitempty"`
+	Address   types.IPNet `json:"address"`
+	Gateway   net.IP      `json:"gateway,omitempty"`
+}
+
+func (c *IPConfig) MarshalJSON() ([]byte, error) {
+	ipc := ipConfig{
+		Version:   c.Version,
+		Interface: c.Interface,
+		Address:   types.IPNet(c.Address),
+		Gateway:   c.Gateway,
+	}
+
+	return json.Marshal(ipc)
+}
+
+func (c *IPConfig) UnmarshalJSON(data []byte) error {
+	ipc := ipConfig{}
+	if err := json.Unmarshal(data, &ipc); err != nil {
+		return err
+	}
+
+	c.Version = ipc.Version
+	c.Interface = ipc.Interface
+	c.Address = net.IPNet(ipc.Address)
+	c.Gateway = ipc.Gateway
+	return nil
+}

+ 307 - 0
vendor/github.com/containernetworking/cni/pkg/types/100/types.go

@@ -0,0 +1,307 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types100
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"os"
+
+	"github.com/containernetworking/cni/pkg/types"
+	types040 "github.com/containernetworking/cni/pkg/types/040"
+	convert "github.com/containernetworking/cni/pkg/types/internal"
+)
+
+const ImplementedSpecVersion string = "1.0.0"
+
+var supportedVersions = []string{ImplementedSpecVersion}
+
+// Register converters for all versions less than the implemented spec version
+func init() {
+	// Up-converters
+	convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x)
+	convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x)
+	convert.RegisterConverter("0.3.0", supportedVersions, convertFrom04x)
+	convert.RegisterConverter("0.3.1", supportedVersions, convertFrom04x)
+	convert.RegisterConverter("0.4.0", supportedVersions, convertFrom04x)
+
+	// Down-converters
+	convert.RegisterConverter("1.0.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x)
+	convert.RegisterConverter("1.0.0", []string{"0.1.0", "0.2.0"}, convertTo02x)
+
+	// Creator
+	convert.RegisterCreator(supportedVersions, NewResult)
+}
+
+func NewResult(data []byte) (types.Result, error) {
+	result := &Result{}
+	if err := json.Unmarshal(data, result); err != nil {
+		return nil, err
+	}
+	for _, v := range supportedVersions {
+		if result.CNIVersion == v {
+			return result, nil
+		}
+	}
+	return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q",
+		supportedVersions, result.CNIVersion)
+}
+
+func GetResult(r types.Result) (*Result, error) {
+	resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion)
+	if err != nil {
+		return nil, err
+	}
+	result, ok := resultCurrent.(*Result)
+	if !ok {
+		return nil, fmt.Errorf("failed to convert result")
+	}
+	return result, nil
+}
+
+func NewResultFromResult(result types.Result) (*Result, error) {
+	newResult, err := convert.Convert(result, ImplementedSpecVersion)
+	if err != nil {
+		return nil, err
+	}
+	return newResult.(*Result), nil
+}
+
+// Result is what gets returned from the plugin (via stdout) to the caller
+type Result struct {
+	CNIVersion string         `json:"cniVersion,omitempty"`
+	Interfaces []*Interface   `json:"interfaces,omitempty"`
+	IPs        []*IPConfig    `json:"ips,omitempty"`
+	Routes     []*types.Route `json:"routes,omitempty"`
+	DNS        types.DNS      `json:"dns,omitempty"`
+}
+
+func convertFrom02x(from types.Result, toVersion string) (types.Result, error) {
+	result040, err := convert.Convert(from, "0.4.0")
+	if err != nil {
+		return nil, err
+	}
+	result100, err := convertFrom04x(result040, ImplementedSpecVersion)
+	if err != nil {
+		return nil, err
+	}
+	return result100, nil
+}
+
+func convertIPConfigFrom040(from *types040.IPConfig) *IPConfig {
+	to := &IPConfig{
+		Address: from.Address,
+		Gateway: from.Gateway,
+	}
+	if from.Interface != nil {
+		intf := *from.Interface
+		to.Interface = &intf
+	}
+	return to
+}
+
+func convertInterfaceFrom040(from *types040.Interface) *Interface {
+	return &Interface{
+		Name:    from.Name,
+		Mac:     from.Mac,
+		Sandbox: from.Sandbox,
+	}
+}
+
+func convertFrom04x(from types.Result, toVersion string) (types.Result, error) {
+	fromResult := from.(*types040.Result)
+	toResult := &Result{
+		CNIVersion: toVersion,
+		DNS:        *fromResult.DNS.Copy(),
+		Routes:     []*types.Route{},
+	}
+	for _, fromIntf := range fromResult.Interfaces {
+		toResult.Interfaces = append(toResult.Interfaces, convertInterfaceFrom040(fromIntf))
+	}
+	for _, fromIPC := range fromResult.IPs {
+		toResult.IPs = append(toResult.IPs, convertIPConfigFrom040(fromIPC))
+	}
+	for _, fromRoute := range fromResult.Routes {
+		toResult.Routes = append(toResult.Routes, fromRoute.Copy())
+	}
+	return toResult, nil
+}
+
+func convertIPConfigTo040(from *IPConfig) *types040.IPConfig {
+	version := "6"
+	if from.Address.IP.To4() != nil {
+		version = "4"
+	}
+	to := &types040.IPConfig{
+		Version: version,
+		Address: from.Address,
+		Gateway: from.Gateway,
+	}
+	if from.Interface != nil {
+		intf := *from.Interface
+		to.Interface = &intf
+	}
+	return to
+}
+
+func convertInterfaceTo040(from *Interface) *types040.Interface {
+	return &types040.Interface{
+		Name:    from.Name,
+		Mac:     from.Mac,
+		Sandbox: from.Sandbox,
+	}
+}
+
+func convertTo04x(from types.Result, toVersion string) (types.Result, error) {
+	fromResult := from.(*Result)
+	toResult := &types040.Result{
+		CNIVersion: toVersion,
+		DNS:        *fromResult.DNS.Copy(),
+		Routes:     []*types.Route{},
+	}
+	for _, fromIntf := range fromResult.Interfaces {
+		toResult.Interfaces = append(toResult.Interfaces, convertInterfaceTo040(fromIntf))
+	}
+	for _, fromIPC := range fromResult.IPs {
+		toResult.IPs = append(toResult.IPs, convertIPConfigTo040(fromIPC))
+	}
+	for _, fromRoute := range fromResult.Routes {
+		toResult.Routes = append(toResult.Routes, fromRoute.Copy())
+	}
+	return toResult, nil
+}
+
+func convertTo02x(from types.Result, toVersion string) (types.Result, error) {
+	// First convert to 0.4.0
+	result040, err := convertTo04x(from, "0.4.0")
+	if err != nil {
+		return nil, err
+	}
+	result02x, err := convert.Convert(result040, toVersion)
+	if err != nil {
+		return nil, err
+	}
+	return result02x, nil
+}
+
+func (r *Result) Version() string {
+	return r.CNIVersion
+}
+
+func (r *Result) GetAsVersion(version string) (types.Result, error) {
+	// If the creator of the result did not set the CNIVersion, assume it
+	// should be the highest spec version implemented by this Result
+	if r.CNIVersion == "" {
+		r.CNIVersion = ImplementedSpecVersion
+	}
+	return convert.Convert(r, version)
+}
+
+func (r *Result) Print() error {
+	return r.PrintTo(os.Stdout)
+}
+
+func (r *Result) PrintTo(writer io.Writer) error {
+	data, err := json.MarshalIndent(r, "", "    ")
+	if err != nil {
+		return err
+	}
+	_, err = writer.Write(data)
+	return err
+}
+
+// Interface contains values about the created interfaces
+type Interface struct {
+	Name    string `json:"name"`
+	Mac     string `json:"mac,omitempty"`
+	Sandbox string `json:"sandbox,omitempty"`
+}
+
+func (i *Interface) String() string {
+	return fmt.Sprintf("%+v", *i)
+}
+
+func (i *Interface) Copy() *Interface {
+	if i == nil {
+		return nil
+	}
+	newIntf := *i
+	return &newIntf
+}
+
+// Int returns a pointer to the int value passed in.  Used to
+// set the IPConfig.Interface field.
+func Int(v int) *int {
+	return &v
+}
+
+// IPConfig contains values necessary to configure an IP address on an interface
+type IPConfig struct {
+	// Index into Result structs Interfaces list
+	Interface *int
+	Address   net.IPNet
+	Gateway   net.IP
+}
+
+func (i *IPConfig) String() string {
+	return fmt.Sprintf("%+v", *i)
+}
+
+func (i *IPConfig) Copy() *IPConfig {
+	if i == nil {
+		return nil
+	}
+
+	ipc := &IPConfig{
+		Address: i.Address,
+		Gateway: i.Gateway,
+	}
+	if i.Interface != nil {
+		intf := *i.Interface
+		ipc.Interface = &intf
+	}
+	return ipc
+}
+
+// JSON (un)marshallable types
+type ipConfig struct {
+	Interface *int        `json:"interface,omitempty"`
+	Address   types.IPNet `json:"address"`
+	Gateway   net.IP      `json:"gateway,omitempty"`
+}
+
+func (c *IPConfig) MarshalJSON() ([]byte, error) {
+	ipc := ipConfig{
+		Interface: c.Interface,
+		Address:   types.IPNet(c.Address),
+		Gateway:   c.Gateway,
+	}
+
+	return json.Marshal(ipc)
+}
+
+func (c *IPConfig) UnmarshalJSON(data []byte) error {
+	ipc := ipConfig{}
+	if err := json.Unmarshal(data, &ipc); err != nil {
+		return err
+	}
+
+	c.Interface = ipc.Interface
+	c.Address = net.IPNet(ipc.Address)
+	c.Gateway = ipc.Gateway
+	return nil
+}

+ 122 - 0
vendor/github.com/containernetworking/cni/pkg/types/args.go

@@ -0,0 +1,122 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"encoding"
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+// UnmarshallableBool typedef for builtin bool
+// because builtin type's methods can't be declared
+type UnmarshallableBool bool
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Returns boolean true if the string is "1" or "[Tt]rue"
+// Returns boolean false if the string is "0" or "[Ff]alse"
+func (b *UnmarshallableBool) UnmarshalText(data []byte) error {
+	s := strings.ToLower(string(data))
+	switch s {
+	case "1", "true":
+		*b = true
+	case "0", "false":
+		*b = false
+	default:
+		return fmt.Errorf("boolean unmarshal error: invalid input %s", s)
+	}
+	return nil
+}
+
+// UnmarshallableString typedef for builtin string
+type UnmarshallableString string
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Returns the string
+func (s *UnmarshallableString) UnmarshalText(data []byte) error {
+	*s = UnmarshallableString(data)
+	return nil
+}
+
+// CommonArgs contains the IgnoreUnknown argument
+// and must be embedded by all Arg structs
+type CommonArgs struct {
+	IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"`
+}
+
+// GetKeyField is a helper function to receive Values
+// Values that represent a pointer to a struct
+func GetKeyField(keyString string, v reflect.Value) reflect.Value {
+	return v.Elem().FieldByName(keyString)
+}
+
+// UnmarshalableArgsError is used to indicate error unmarshalling args
+// from the args-string in the form "K=V;K2=V2;..."
+type UnmarshalableArgsError struct {
+	error
+}
+
+// LoadArgs parses args from a string in the form "K=V;K2=V2;..."
+func LoadArgs(args string, container interface{}) error {
+	if args == "" {
+		return nil
+	}
+
+	containerValue := reflect.ValueOf(container)
+
+	pairs := strings.Split(args, ";")
+	unknownArgs := []string{}
+	for _, pair := range pairs {
+		kv := strings.Split(pair, "=")
+		if len(kv) != 2 {
+			return fmt.Errorf("ARGS: invalid pair %q", pair)
+		}
+		keyString := kv[0]
+		valueString := kv[1]
+		keyField := GetKeyField(keyString, containerValue)
+		if !keyField.IsValid() {
+			unknownArgs = append(unknownArgs, pair)
+			continue
+		}
+
+		var keyFieldInterface interface{}
+		switch {
+		case keyField.Kind() == reflect.Ptr:
+			keyField.Set(reflect.New(keyField.Type().Elem()))
+			keyFieldInterface = keyField.Interface()
+		case keyField.CanAddr() && keyField.Addr().CanInterface():
+			keyFieldInterface = keyField.Addr().Interface()
+		default:
+			return UnmarshalableArgsError{fmt.Errorf("field '%s' has no valid interface", keyString)}
+		}
+		u, ok := keyFieldInterface.(encoding.TextUnmarshaler)
+		if !ok {
+			return UnmarshalableArgsError{fmt.Errorf(
+				"ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler",
+				keyString, reflect.TypeOf(keyFieldInterface))}
+		}
+		err := u.UnmarshalText([]byte(valueString))
+		if err != nil {
+			return fmt.Errorf("ARGS: error parsing value of pair %q: %w", pair, err)
+		}
+	}
+
+	isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool()
+	if len(unknownArgs) > 0 && !isIgnoreUnknown {
+		return fmt.Errorf("ARGS: unknown args %q", unknownArgs)
+	}
+	return nil
+}

+ 56 - 0
vendor/github.com/containernetworking/cni/pkg/types/create/create.go

@@ -0,0 +1,56 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package create
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/containernetworking/cni/pkg/types"
+	convert "github.com/containernetworking/cni/pkg/types/internal"
+)
+
+// DecodeVersion returns the CNI version from CNI configuration or result JSON,
+// or an error if the operation could not be performed.
+func DecodeVersion(jsonBytes []byte) (string, error) {
+	var conf struct {
+		CNIVersion string `json:"cniVersion"`
+	}
+	err := json.Unmarshal(jsonBytes, &conf)
+	if err != nil {
+		return "", fmt.Errorf("decoding version from network config: %w", err)
+	}
+	if conf.CNIVersion == "" {
+		return "0.1.0", nil
+	}
+	return conf.CNIVersion, nil
+}
+
+// Create creates a CNI Result using the given JSON with the expected
+// version, or an error if the creation could not be performed
+func Create(version string, bytes []byte) (types.Result, error) {
+	return convert.Create(version, bytes)
+}
+
+// CreateFromBytes creates a CNI Result from the given JSON, automatically
+// detecting the CNI spec version of the result. An error is returned if the
+// operation could not be performed.
+func CreateFromBytes(bytes []byte) (types.Result, error) {
+	version, err := DecodeVersion(bytes)
+	if err != nil {
+		return nil, err
+	}
+	return convert.Create(version, bytes)
+}

+ 92 - 0
vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go

@@ -0,0 +1,92 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package convert
+
+import (
+	"fmt"
+
+	"github.com/containernetworking/cni/pkg/types"
+)
+
+// ConvertFn should convert from the given arbitrary Result type into a
+// Result implementing CNI specification version passed in toVersion.
+// The function is guaranteed to be passed a Result type matching the
+// fromVersion it was registered with, and is guaranteed to be
+// passed a toVersion matching one of the toVersions it was registered with.
+type ConvertFn func(from types.Result, toVersion string) (types.Result, error)
+
+type converter struct {
+	// fromVersion is the CNI Result spec version that convertFn accepts
+	fromVersion string
+	// toVersions is a list of versions that convertFn can convert to
+	toVersions []string
+	convertFn  ConvertFn
+}
+
+var converters []*converter
+
+func findConverter(fromVersion, toVersion string) *converter {
+	for _, c := range converters {
+		if c.fromVersion == fromVersion {
+			for _, v := range c.toVersions {
+				if v == toVersion {
+					return c
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// Convert converts a CNI Result to the requested CNI specification version,
+// or returns an error if the conversion could not be performed or failed
+func Convert(from types.Result, toVersion string) (types.Result, error) {
+	if toVersion == "" {
+		toVersion = "0.1.0"
+	}
+
+	fromVersion := from.Version()
+
+	// Shortcut for same version
+	if fromVersion == toVersion {
+		return from, nil
+	}
+
+	// Otherwise find the right converter
+	c := findConverter(fromVersion, toVersion)
+	if c == nil {
+		return nil, fmt.Errorf("no converter for CNI result version %s to %s",
+			fromVersion, toVersion)
+	}
+	return c.convertFn(from, toVersion)
+}
+
+// RegisterConverter registers a CNI Result converter. SHOULD NOT BE CALLED
+// EXCEPT FROM CNI ITSELF.
+func RegisterConverter(fromVersion string, toVersions []string, convertFn ConvertFn) {
+	// Make sure there is no converter already registered for these
+	// from and to versions
+	for _, v := range toVersions {
+		if findConverter(fromVersion, v) != nil {
+			panic(fmt.Sprintf("converter already registered for %s to %s",
+				fromVersion, v))
+		}
+	}
+	converters = append(converters, &converter{
+		fromVersion: fromVersion,
+		toVersions:  toVersions,
+		convertFn:   convertFn,
+	})
+}

+ 66 - 0
vendor/github.com/containernetworking/cni/pkg/types/internal/create.go

@@ -0,0 +1,66 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package convert
+
+import (
+	"fmt"
+
+	"github.com/containernetworking/cni/pkg/types"
+)
+
+type ResultFactoryFunc func([]byte) (types.Result, error)
+
+type creator struct {
+	// CNI Result spec versions that createFn can create a Result for
+	versions []string
+	createFn ResultFactoryFunc
+}
+
+var creators []*creator
+
+func findCreator(version string) *creator {
+	for _, c := range creators {
+		for _, v := range c.versions {
+			if v == version {
+				return c
+			}
+		}
+	}
+	return nil
+}
+
+// Create creates a CNI Result using the given JSON, or an error if the creation
+// could not be performed
+func Create(version string, bytes []byte) (types.Result, error) {
+	if c := findCreator(version); c != nil {
+		return c.createFn(bytes)
+	}
+	return nil, fmt.Errorf("unsupported CNI result version %q", version)
+}
+
+// RegisterCreator registers a CNI Result creator. SHOULD NOT BE CALLED
+// EXCEPT FROM CNI ITSELF.
+func RegisterCreator(versions []string, createFn ResultFactoryFunc) {
+	// Make sure there is no creator already registered for these versions
+	for _, v := range versions {
+		if findCreator(v) != nil {
+			panic(fmt.Sprintf("creator already registered for %s", v))
+		}
+	}
+	creators = append(creators, &creator{
+		versions: versions,
+		createFn: createFn,
+	})
+}

+ 234 - 0
vendor/github.com/containernetworking/cni/pkg/types/types.go

@@ -0,0 +1,234 @@
+// Copyright 2015 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"os"
+)
+
+// like net.IPNet but adds JSON marshalling and unmarshalling
+type IPNet net.IPNet
+
+// ParseCIDR takes a string like "10.2.3.1/24" and
+// return IPNet with "10.2.3.1" and /24 mask
+func ParseCIDR(s string) (*net.IPNet, error) {
+	ip, ipn, err := net.ParseCIDR(s)
+	if err != nil {
+		return nil, err
+	}
+
+	ipn.IP = ip
+	return ipn, nil
+}
+
+func (n IPNet) MarshalJSON() ([]byte, error) {
+	return json.Marshal((*net.IPNet)(&n).String())
+}
+
+func (n *IPNet) UnmarshalJSON(data []byte) error {
+	var s string
+	if err := json.Unmarshal(data, &s); err != nil {
+		return err
+	}
+
+	tmp, err := ParseCIDR(s)
+	if err != nil {
+		return err
+	}
+
+	*n = IPNet(*tmp)
+	return nil
+}
+
+// NetConf describes a network.
+type NetConf struct {
+	CNIVersion string `json:"cniVersion,omitempty"`
+
+	Name         string          `json:"name,omitempty"`
+	Type         string          `json:"type,omitempty"`
+	Capabilities map[string]bool `json:"capabilities,omitempty"`
+	IPAM         IPAM            `json:"ipam,omitempty"`
+	DNS          DNS             `json:"dns"`
+
+	RawPrevResult map[string]interface{} `json:"prevResult,omitempty"`
+	PrevResult    Result                 `json:"-"`
+}
+
+type IPAM struct {
+	Type string `json:"type,omitempty"`
+}
+
+// NetConfList describes an ordered list of networks.
+type NetConfList struct {
+	CNIVersion string `json:"cniVersion,omitempty"`
+
+	Name         string     `json:"name,omitempty"`
+	DisableCheck bool       `json:"disableCheck,omitempty"`
+	Plugins      []*NetConf `json:"plugins,omitempty"`
+}
+
+// Result is an interface that provides the result of plugin execution
+type Result interface {
+	// The highest CNI specification result version the result supports
+	// without having to convert
+	Version() string
+
+	// Returns the result converted into the requested CNI specification
+	// result version, or an error if conversion failed
+	GetAsVersion(version string) (Result, error)
+
+	// Prints the result in JSON format to stdout
+	Print() error
+
+	// Prints the result in JSON format to provided writer
+	PrintTo(writer io.Writer) error
+}
+
+func PrintResult(result Result, version string) error {
+	newResult, err := result.GetAsVersion(version)
+	if err != nil {
+		return err
+	}
+	return newResult.Print()
+}
+
+// DNS contains values interesting for DNS resolvers
+type DNS struct {
+	Nameservers []string `json:"nameservers,omitempty"`
+	Domain      string   `json:"domain,omitempty"`
+	Search      []string `json:"search,omitempty"`
+	Options     []string `json:"options,omitempty"`
+}
+
+func (d *DNS) Copy() *DNS {
+	if d == nil {
+		return nil
+	}
+
+	to := &DNS{Domain: d.Domain}
+	for _, ns := range d.Nameservers {
+		to.Nameservers = append(to.Nameservers, ns)
+	}
+	for _, s := range d.Search {
+		to.Search = append(to.Search, s)
+	}
+	for _, o := range d.Options {
+		to.Options = append(to.Options, o)
+	}
+	return to
+}
+
+type Route struct {
+	Dst net.IPNet
+	GW  net.IP
+}
+
+func (r *Route) String() string {
+	return fmt.Sprintf("%+v", *r)
+}
+
+func (r *Route) Copy() *Route {
+	if r == nil {
+		return nil
+	}
+
+	return &Route{
+		Dst: r.Dst,
+		GW:  r.GW,
+	}
+}
+
+// Well known error codes
+// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes
+const (
+	ErrUnknown                     uint = iota // 0
+	ErrIncompatibleCNIVersion                  // 1
+	ErrUnsupportedField                        // 2
+	ErrUnknownContainer                        // 3
+	ErrInvalidEnvironmentVariables             // 4
+	ErrIOFailure                               // 5
+	ErrDecodingFailure                         // 6
+	ErrInvalidNetworkConfig                    // 7
+	ErrTryAgainLater               uint = 11
+	ErrInternal                    uint = 999
+)
+
+type Error struct {
+	Code    uint   `json:"code"`
+	Msg     string `json:"msg"`
+	Details string `json:"details,omitempty"`
+}
+
+func NewError(code uint, msg, details string) *Error {
+	return &Error{
+		Code:    code,
+		Msg:     msg,
+		Details: details,
+	}
+}
+
+func (e *Error) Error() string {
+	details := ""
+	if e.Details != "" {
+		details = fmt.Sprintf("; %v", e.Details)
+	}
+	return fmt.Sprintf("%v%v", e.Msg, details)
+}
+
+func (e *Error) Print() error {
+	return prettyPrint(e)
+}
+
+// net.IPNet is not JSON (un)marshallable so this duality is needed
+// for our custom IPNet type
+
+// JSON (un)marshallable types
+type route struct {
+	Dst IPNet  `json:"dst"`
+	GW  net.IP `json:"gw,omitempty"`
+}
+
+func (r *Route) UnmarshalJSON(data []byte) error {
+	rt := route{}
+	if err := json.Unmarshal(data, &rt); err != nil {
+		return err
+	}
+
+	r.Dst = net.IPNet(rt.Dst)
+	r.GW = rt.GW
+	return nil
+}
+
+func (r Route) MarshalJSON() ([]byte, error) {
+	rt := route{
+		Dst: IPNet(r.Dst),
+		GW:  r.GW,
+	}
+
+	return json.Marshal(rt)
+}
+
+func prettyPrint(obj interface{}) error {
+	data, err := json.MarshalIndent(obj, "", "    ")
+	if err != nil {
+		return err
+	}
+	_, err = os.Stdout.Write(data)
+	return err
+}

+ 84 - 0
vendor/github.com/containernetworking/cni/pkg/utils/utils.go

@@ -0,0 +1,84 @@
+// Copyright 2019 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"bytes"
+	"fmt"
+	"regexp"
+	"unicode"
+
+	"github.com/containernetworking/cni/pkg/types"
+)
+
+const (
+	// cniValidNameChars is the regexp used to validate valid characters in
+	// containerID and networkName
+	cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]`
+
+	// maxInterfaceNameLength is the length max of a valid interface name
+	maxInterfaceNameLength = 15
+)
+
+var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`)
+
+// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters
+func ValidateContainerID(containerID string) *types.Error {
+
+	if containerID == "" {
+		return types.NewError(types.ErrUnknownContainer, "missing containerID", "")
+	}
+	if !cniReg.MatchString(containerID) {
+		return types.NewError(types.ErrInvalidEnvironmentVariables, "invalid characters in containerID", containerID)
+	}
+	return nil
+}
+
+// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters
+func ValidateNetworkName(networkName string) *types.Error {
+
+	if networkName == "" {
+		return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "")
+	}
+	if !cniReg.MatchString(networkName) {
+		return types.NewError(types.ErrInvalidNetworkConfig, "invalid characters found in network name", networkName)
+	}
+	return nil
+}
+
+// ValidateInterfaceName will validate the interface name based on the three rules below
+// 1. The name must not be empty
+// 2. The name must be less than 16 characters
+// 3. The name must not be "." or ".."
+// 3. The name must not contain / or : or any whitespace characters
+// ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024
+func ValidateInterfaceName(ifName string) *types.Error {
+	if len(ifName) == 0 {
+		return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is empty", "")
+	}
+	if len(ifName) > maxInterfaceNameLength {
+		return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is too long", fmt.Sprintf("interface name should be less than %d characters", maxInterfaceNameLength+1))
+	}
+	if ifName == "." || ifName == ".." {
+		return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is . or ..", "")
+	}
+	for _, r := range bytes.Runes([]byte(ifName)) {
+		if r == '/' || r == ':' || unicode.IsSpace(r) {
+			return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name contains / or : or whitespace characters", "")
+		}
+	}
+
+	return nil
+}

+ 26 - 0
vendor/github.com/containernetworking/cni/pkg/version/conf.go

@@ -0,0 +1,26 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+	"github.com/containernetworking/cni/pkg/types/create"
+)
+
+// ConfigDecoder can decode the CNI version available in network config data
+type ConfigDecoder struct{}
+
+func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) {
+	return create.DecodeVersion(jsonBytes)
+}

+ 144 - 0
vendor/github.com/containernetworking/cni/pkg/version/plugin.go

@@ -0,0 +1,144 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+)
+
+// PluginInfo reports information about CNI versioning
+type PluginInfo interface {
+	// SupportedVersions returns one or more CNI spec versions that the plugin
+	// supports.  If input is provided in one of these versions, then the plugin
+	// promises to use the same CNI version in its response
+	SupportedVersions() []string
+
+	// Encode writes this CNI version information as JSON to the given Writer
+	Encode(io.Writer) error
+}
+
+type pluginInfo struct {
+	CNIVersion_        string   `json:"cniVersion"`
+	SupportedVersions_ []string `json:"supportedVersions,omitempty"`
+}
+
+// pluginInfo implements the PluginInfo interface
+var _ PluginInfo = &pluginInfo{}
+
+func (p *pluginInfo) Encode(w io.Writer) error {
+	return json.NewEncoder(w).Encode(p)
+}
+
+func (p *pluginInfo) SupportedVersions() []string {
+	return p.SupportedVersions_
+}
+
+// PluginSupports returns a new PluginInfo that will report the given versions
+// as supported
+func PluginSupports(supportedVersions ...string) PluginInfo {
+	if len(supportedVersions) < 1 {
+		panic("programmer error: you must support at least one version")
+	}
+	return &pluginInfo{
+		CNIVersion_:        Current(),
+		SupportedVersions_: supportedVersions,
+	}
+}
+
+// PluginDecoder can decode the response returned by a plugin's VERSION command
+type PluginDecoder struct{}
+
+func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) {
+	var info pluginInfo
+	err := json.Unmarshal(jsonBytes, &info)
+	if err != nil {
+		return nil, fmt.Errorf("decoding version info: %w", err)
+	}
+	if info.CNIVersion_ == "" {
+		return nil, fmt.Errorf("decoding version info: missing field cniVersion")
+	}
+	if len(info.SupportedVersions_) == 0 {
+		if info.CNIVersion_ == "0.2.0" {
+			return PluginSupports("0.1.0", "0.2.0"), nil
+		}
+		return nil, fmt.Errorf("decoding version info: missing field supportedVersions")
+	}
+	return &info, nil
+}
+
+// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major,
+// minor, and micro numbers or returns an error
+func ParseVersion(version string) (int, int, int, error) {
+	var major, minor, micro int
+	if version == "" { // special case: no version declared == v0.1.0
+		return 0, 1, 0, nil
+	}
+
+	parts := strings.Split(version, ".")
+	if len(parts) >= 4 {
+		return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version)
+	}
+
+	major, err := strconv.Atoi(parts[0])
+	if err != nil {
+		return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %w", parts[0], err)
+	}
+
+	if len(parts) >= 2 {
+		minor, err = strconv.Atoi(parts[1])
+		if err != nil {
+			return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %w", parts[1], err)
+		}
+	}
+
+	if len(parts) >= 3 {
+		micro, err = strconv.Atoi(parts[2])
+		if err != nil {
+			return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %w", parts[2], err)
+		}
+	}
+
+	return major, minor, micro, nil
+}
+
+// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro
+// numbers, and compares them to determine whether the first version is greater
+// than or equal to the second
+func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) {
+	firstMajor, firstMinor, firstMicro, err := ParseVersion(version)
+	if err != nil {
+		return false, err
+	}
+
+	secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion)
+	if err != nil {
+		return false, err
+	}
+
+	if firstMajor > secondMajor {
+		return true, nil
+	} else if firstMajor == secondMajor {
+		if firstMinor > secondMinor {
+			return true, nil
+		} else if firstMinor == secondMinor && firstMicro >= secondMicro {
+			return true, nil
+		}
+	}
+	return false, nil
+}

+ 49 - 0
vendor/github.com/containernetworking/cni/pkg/version/reconcile.go

@@ -0,0 +1,49 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import "fmt"
+
+type ErrorIncompatible struct {
+	Config    string
+	Supported []string
+}
+
+func (e *ErrorIncompatible) Details() string {
+	return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported)
+}
+
+func (e *ErrorIncompatible) Error() string {
+	return fmt.Sprintf("incompatible CNI versions: %s", e.Details())
+}
+
+type Reconciler struct{}
+
+func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible {
+	return r.CheckRaw(configVersion, pluginInfo.SupportedVersions())
+}
+
+func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible {
+	for _, supportedVersion := range supportedVersions {
+		if configVersion == supportedVersion {
+			return nil
+		}
+	}
+
+	return &ErrorIncompatible{
+		Config:    configVersion,
+		Supported: supportedVersions,
+	}
+}

+ 89 - 0
vendor/github.com/containernetworking/cni/pkg/version/version.go

@@ -0,0 +1,89 @@
+// Copyright 2016 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/containernetworking/cni/pkg/types"
+	types100 "github.com/containernetworking/cni/pkg/types/100"
+	"github.com/containernetworking/cni/pkg/types/create"
+)
+
+// Current reports the version of the CNI spec implemented by this library
+func Current() string {
+	return types100.ImplementedSpecVersion
+}
+
+// Legacy PluginInfo describes a plugin that is backwards compatible with the
+// CNI spec version 0.1.0.  In particular, a runtime compiled against the 0.1.0
+// library ought to work correctly with a plugin that reports support for
+// Legacy versions.
+//
+// Any future CNI spec versions which meet this definition should be added to
+// this list.
+var Legacy = PluginSupports("0.1.0", "0.2.0")
+var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0")
+
+// VersionsFrom returns a list of versions starting from min, inclusive
+func VersionsStartingFrom(min string) PluginInfo {
+	out := []string{}
+	// cheat, just assume ordered
+	ok := false
+	for _, v := range All.SupportedVersions() {
+		if !ok && v == min {
+			ok = true
+		}
+		if ok {
+			out = append(out, v)
+		}
+	}
+	return PluginSupports(out...)
+}
+
+// Finds a Result object matching the requested version (if any) and asks
+// that object to parse the plugin result, returning an error if parsing failed.
+func NewResult(version string, resultBytes []byte) (types.Result, error) {
+	return create.Create(version, resultBytes)
+}
+
+// ParsePrevResult parses a prevResult in a NetConf structure and sets
+// the NetConf's PrevResult member to the parsed Result object.
+func ParsePrevResult(conf *types.NetConf) error {
+	if conf.RawPrevResult == nil {
+		return nil
+	}
+
+	// Prior to 1.0.0, Result types may not marshal a CNIVersion. Since the
+	// result version must match the config version, if the Result's version
+	// is empty, inject the config version.
+	if ver, ok := conf.RawPrevResult["CNIVersion"]; !ok || ver == "" {
+		conf.RawPrevResult["CNIVersion"] = conf.CNIVersion
+	}
+
+	resultBytes, err := json.Marshal(conf.RawPrevResult)
+	if err != nil {
+		return fmt.Errorf("could not serialize prevResult: %w", err)
+	}
+
+	conf.RawPrevResult = nil
+	conf.PrevResult, err = create.Create(conf.CNIVersion, resultBytes)
+	if err != nil {
+		return fmt.Errorf("could not parse prevResult: %w", err)
+	}
+
+	return nil
+}

+ 37 - 0
vendor/github.com/dimchansky/utfbom/.gitignore

@@ -0,0 +1,37 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+*.o
+*.a
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.prof
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+# Gogland
+.idea/

+ 29 - 0
vendor/github.com/dimchansky/utfbom/.travis.yml

@@ -0,0 +1,29 @@
+language: go
+sudo: false
+
+go:
+  - 1.10.x
+  - 1.11.x
+  - 1.12.x  
+  - 1.13.x
+  - 1.14.x
+  - 1.15.x
+
+cache:
+  directories:
+    - $HOME/.cache/go-build
+    - $HOME/gopath/pkg/mod
+
+env:
+  global:
+    - GO111MODULE=on
+
+before_install:
+  - go get github.com/mattn/goveralls
+  - go get golang.org/x/tools/cmd/cover
+  - go get golang.org/x/tools/cmd/goimports
+  - go get golang.org/x/lint/golint
+script:
+  - gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false)
+  - golint ./... # This won't break the build, just show warnings
+  - $HOME/gopath/bin/goveralls -service=travis-ci

+ 201 - 0
vendor/github.com/dimchansky/utfbom/LICENSE

@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright (c) 2018-2020, Dmitrij Koniajev (dimchansky@gmail.com)
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 66 - 0
vendor/github.com/dimchansky/utfbom/README.md

@@ -0,0 +1,66 @@
+# utfbom [![Godoc](https://godoc.org/github.com/dimchansky/utfbom?status.png)](https://godoc.org/github.com/dimchansky/utfbom) [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Build Status](https://travis-ci.org/dimchansky/utfbom.svg?branch=master)](https://travis-ci.org/dimchansky/utfbom) [![Go Report Card](https://goreportcard.com/badge/github.com/dimchansky/utfbom)](https://goreportcard.com/report/github.com/dimchansky/utfbom) [![Coverage Status](https://coveralls.io/repos/github/dimchansky/utfbom/badge.svg?branch=master)](https://coveralls.io/github/dimchansky/utfbom?branch=master)
+
+The package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. It can also return the encoding detected by the BOM.
+
+## Installation
+
+    go get -u github.com/dimchansky/utfbom
+    
+## Example
+
+```go
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+
+	"github.com/dimchansky/utfbom"
+)
+
+func main() {
+	trySkip([]byte("\xEF\xBB\xBFhello"))
+	trySkip([]byte("hello"))
+}
+
+func trySkip(byteData []byte) {
+	fmt.Println("Input:", byteData)
+
+	// just skip BOM
+	output, err := ioutil.ReadAll(utfbom.SkipOnly(bytes.NewReader(byteData)))
+	if err != nil {
+		fmt.Println(err)
+		return
+	}
+	fmt.Println("ReadAll with BOM skipping", output)
+
+	// skip BOM and detect encoding
+	sr, enc := utfbom.Skip(bytes.NewReader(byteData))
+	fmt.Printf("Detected encoding: %s\n", enc)
+	output, err = ioutil.ReadAll(sr)
+	if err != nil {
+		fmt.Println(err)
+		return
+	}
+	fmt.Println("ReadAll with BOM detection and skipping", output)
+	fmt.Println()
+}
+```
+
+Output:
+
+```
+$ go run main.go
+Input: [239 187 191 104 101 108 108 111]
+ReadAll with BOM skipping [104 101 108 108 111]
+Detected encoding: UTF8
+ReadAll with BOM detection and skipping [104 101 108 108 111]
+
+Input: [104 101 108 108 111]
+ReadAll with BOM skipping [104 101 108 108 111]
+Detected encoding: Unknown
+ReadAll with BOM detection and skipping [104 101 108 108 111]
+```
+
+

+ 192 - 0
vendor/github.com/dimchansky/utfbom/utfbom.go

@@ -0,0 +1,192 @@
+// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary.
+// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader
+// interface but provides automatic BOM checking and removing as necessary.
+package utfbom
+
+import (
+	"errors"
+	"io"
+)
+
+// Encoding is type alias for detected UTF encoding.
+type Encoding int
+
+// Constants to identify detected UTF encodings.
+const (
+	// Unknown encoding, returned when no BOM was detected
+	Unknown Encoding = iota
+
+	// UTF8, BOM bytes: EF BB BF
+	UTF8
+
+	// UTF-16, big-endian, BOM bytes: FE FF
+	UTF16BigEndian
+
+	// UTF-16, little-endian, BOM bytes: FF FE
+	UTF16LittleEndian
+
+	// UTF-32, big-endian, BOM bytes: 00 00 FE FF
+	UTF32BigEndian
+
+	// UTF-32, little-endian, BOM bytes: FF FE 00 00
+	UTF32LittleEndian
+)
+
+// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface.
+func (e Encoding) String() string {
+	switch e {
+	case UTF8:
+		return "UTF8"
+	case UTF16BigEndian:
+		return "UTF16BigEndian"
+	case UTF16LittleEndian:
+		return "UTF16LittleEndian"
+	case UTF32BigEndian:
+		return "UTF32BigEndian"
+	case UTF32LittleEndian:
+		return "UTF32LittleEndian"
+	default:
+		return "Unknown"
+	}
+}
+
+const maxConsecutiveEmptyReads = 100
+
+// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary.
+// It also returns the encoding detected by the BOM.
+// If the detected encoding is not needed, you can call the SkipOnly function.
+func Skip(rd io.Reader) (*Reader, Encoding) {
+	// Is it already a Reader?
+	b, ok := rd.(*Reader)
+	if ok {
+		return b, Unknown
+	}
+
+	enc, left, err := detectUtf(rd)
+	return &Reader{
+		rd:  rd,
+		buf: left,
+		err: err,
+	}, enc
+}
+
+// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary.
+func SkipOnly(rd io.Reader) *Reader {
+	r, _ := Skip(rd)
+	return r
+}
+
+// Reader implements automatic BOM (Unicode Byte Order Mark) checking and
+// removing as necessary for an io.Reader object.
+type Reader struct {
+	rd  io.Reader // reader provided by the client
+	buf []byte    // buffered data
+	err error     // last error
+}
+
+// Read is an implementation of io.Reader interface.
+// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary.
+func (r *Reader) Read(p []byte) (n int, err error) {
+	if len(p) == 0 {
+		return 0, nil
+	}
+
+	if r.buf == nil {
+		if r.err != nil {
+			return 0, r.readErr()
+		}
+
+		return r.rd.Read(p)
+	}
+
+	// copy as much as we can
+	n = copy(p, r.buf)
+	r.buf = nilIfEmpty(r.buf[n:])
+	return n, nil
+}
+
+func (r *Reader) readErr() error {
+	err := r.err
+	r.err = nil
+	return err
+}
+
+var errNegativeRead = errors.New("utfbom: reader returned negative count from Read")
+
+func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) {
+	buf, err = readBOM(rd)
+
+	if len(buf) >= 4 {
+		if isUTF32BigEndianBOM4(buf) {
+			return UTF32BigEndian, nilIfEmpty(buf[4:]), err
+		}
+		if isUTF32LittleEndianBOM4(buf) {
+			return UTF32LittleEndian, nilIfEmpty(buf[4:]), err
+		}
+	}
+
+	if len(buf) > 2 && isUTF8BOM3(buf) {
+		return UTF8, nilIfEmpty(buf[3:]), err
+	}
+
+	if (err != nil && err != io.EOF) || (len(buf) < 2) {
+		return Unknown, nilIfEmpty(buf), err
+	}
+
+	if isUTF16BigEndianBOM2(buf) {
+		return UTF16BigEndian, nilIfEmpty(buf[2:]), err
+	}
+	if isUTF16LittleEndianBOM2(buf) {
+		return UTF16LittleEndian, nilIfEmpty(buf[2:]), err
+	}
+
+	return Unknown, nilIfEmpty(buf), err
+}
+
+func readBOM(rd io.Reader) (buf []byte, err error) {
+	const maxBOMSize = 4
+	var bom [maxBOMSize]byte // used to read BOM
+
+	// read as many bytes as possible
+	for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] {
+		if n, err = rd.Read(bom[len(buf):]); n < 0 {
+			panic(errNegativeRead)
+		}
+		if n > 0 {
+			nEmpty = 0
+		} else {
+			nEmpty++
+			if nEmpty >= maxConsecutiveEmptyReads {
+				err = io.ErrNoProgress
+			}
+		}
+	}
+	return
+}
+
+func isUTF32BigEndianBOM4(buf []byte) bool {
+	return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF
+}
+
+func isUTF32LittleEndianBOM4(buf []byte) bool {
+	return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00
+}
+
+func isUTF8BOM3(buf []byte) bool {
+	return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF
+}
+
+func isUTF16BigEndianBOM2(buf []byte) bool {
+	return buf[0] == 0xFE && buf[1] == 0xFF
+}
+
+func isUTF16LittleEndianBOM2(buf []byte) bool {
+	return buf[0] == 0xFF && buf[1] == 0xFE
+}
+
+func nilIfEmpty(buf []byte) (res []byte) {
+	if len(buf) > 0 {
+		res = buf
+	}
+	return
+}

+ 4 - 0
vendor/github.com/golang-jwt/jwt/v4/.gitignore

@@ -0,0 +1,4 @@
+.DS_Store
+bin
+.idea/
+

+ 9 - 0
vendor/github.com/golang-jwt/jwt/v4/LICENSE

@@ -0,0 +1,9 @@
+Copyright (c) 2012 Dave Grijalva
+Copyright (c) 2021 golang-jwt maintainers
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+

+ 22 - 0
vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md

@@ -0,0 +1,22 @@
+## Migration Guide (v4.0.0)
+
+Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0), the import path will be:
+
+    "github.com/golang-jwt/jwt/v4"
+
+The `/v4` version will be backwards compatible with existing `v3.x.y` tags in this repo, as well as 
+`github.com/dgrijalva/jwt-go`. For most users this should be a drop-in replacement, if you're having 
+troubles migrating, please open an issue.
+
+You can replace all occurrences of `github.com/dgrijalva/jwt-go` or `github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`.
+
+And then you'd typically run:
+
+```
+go get github.com/golang-jwt/jwt/v4
+go mod tidy
+```
+
+## Older releases (before v3.2.0)
+
+The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md.

+ 138 - 0
vendor/github.com/golang-jwt/jwt/v4/README.md

@@ -0,0 +1,138 @@
+# jwt-go
+
+[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml)
+[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt/v4.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt/v4)
+
+A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
+
+Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) this project adds Go module support, but maintains backwards compatibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`.
+See the [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information.
+
+> After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic.
+
+
+**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail.
+
+**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage.  See the examples provided.
+
+### Supported Go versions
+
+Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy).
+So we will support a major version of Go until there are two newer major releases.
+We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities
+which will not be fixed.
+
+## What the heck is a JWT?
+
+JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
+
+In short, it's a signed JSON object that does something useful (for example, authentication).  It's commonly used for `Bearer` tokens in Oauth 2.  A token is made of three parts, separated by `.`'s.  The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded.  The last part is the signature, encoded the same way.
+
+The first part is called the header.  It contains the necessary information for verifying the last part, the signature.  For example, which encryption method was used for signing and what key was used.
+
+The part in the middle is the interesting bit.  It's called the Claims and contains the actual stuff you care about.  Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own.
+
+## What's in the box?
+
+This library supports the parsing and verification as well as the generation and signing of JWTs.  Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
+
+## Installation Guidelines
+
+1. To install the jwt package, you first need to have [Go](https://go.dev/doc/install) installed, then you can use the command below to add `jwt-go` as a dependency in your Go program.
+
+```sh
+go get -u github.com/golang-jwt/jwt/v4
+```
+
+2. Import it in your code:
+
+```go
+import "github.com/golang-jwt/jwt/v4"
+```
+
+## Examples
+
+See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage:
+
+* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac)
+* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac)
+* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples)
+
+## Extensions
+
+This library publishes all the necessary components for adding your own signing methods or key functions.  Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod` or provide a `jwt.Keyfunc`.
+
+A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs) or to implement additional standards.
+
+| Extension | Purpose                                                                                                  | Repo                                       |
+| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
+| GCP       | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS)             | https://github.com/someone1/gcp-jwt-go     |
+| AWS       | Integrates with AWS Key Management Service, KMS                                                          | https://github.com/matelang/jwt-go-aws-kms |
+| JWKS      | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc       |
+
+*Disclaimer*: Unless otherwise specified, these integrations are maintained by third parties and should not be considered as a primary offer by any of the mentioned cloud providers
+
+## Compliance
+
+This library was last reviewed to comply with [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences:
+
+* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
+
+## Project Status & Versioning
+
+This library is considered production ready.  Feedback and feature requests are appreciated.  The API should be considered stable.  There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
+
+This project uses [Semantic Versioning 2.0.0](http://semver.org).  Accepted pull requests will land on `main`.  Periodically, versions will be tagged from `main`.  You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases).
+
+**BREAKING CHANGES:*** 
+A full list of breaking changes is available in `VERSION_HISTORY.md`.  See `MIGRATION_GUIDE.md` for more information on updating your code.
+
+## Usage Tips
+
+### Signing vs Encryption
+
+A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
+
+* The author of the token was in the possession of the signing secret
+* The data has not been modified since it was signed
+
+It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
+
+### Choosing a Signing Method
+
+There are several signing methods available, and you should probably take the time to learn about the various options before choosing one.  The principal design decision is most likely going to be symmetric vs asymmetric.
+
+Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
+
+Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
+
+### Signing Methods and Key Types
+
+Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
+
+* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
+* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
+* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
+* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation
+
+### JWT and OAuth
+
+It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
+
+Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
+
+* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
+* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
+* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
+
+### Troubleshooting
+
+This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types.
+
+## More
+
+Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt).
+
+The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
+
+[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version of the JWT logo, which is distributed under the terms of the [MIT License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt).

+ 19 - 0
vendor/github.com/golang-jwt/jwt/v4/SECURITY.md

@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+As of February 2022 (and until this document is updated), the latest version `v4` is supported.
+
+## Reporting a Vulnerability
+
+If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s).
+
+You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.
+
+## Public Discussions
+
+Please avoid publicly discussing a potential security vulnerability.
+
+Let's take this offline and find a solution first, this limits the potential impact as much as possible.
+
+We appreciate your help!

+ 135 - 0
vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md

@@ -0,0 +1,135 @@
+## `jwt-go` Version History
+
+#### 4.0.0
+
+* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`.
+
+#### 3.2.2
+
+* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)).
+* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)).
+* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)).
+* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)).
+
+#### 3.2.1
+
+* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code
+	* Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`
+* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160 
+
+#### 3.2.0
+
+* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
+* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
+* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
+* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
+
+#### 3.1.0
+
+* Improvements to `jwt` command line tool
+* Added `SkipClaimsValidation` option to `Parser`
+* Documentation updates
+
+#### 3.0.0
+
+* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
+	* Dropped support for `[]byte` keys when using RSA signing methods.  This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
+	* `ParseFromRequest` has been moved to `request` subpackage and usage has changed
+	* The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`.  The default value is type `MapClaims`, which is an alias to `map[string]interface{}`.  This makes it possible to use a custom type when decoding claims.
+* Other Additions and Changes
+	* Added `Claims` interface type to allow users to decode the claims into a custom type
+	* Added `ParseWithClaims`, which takes a third argument of type `Claims`.  Use this function instead of `Parse` if you have a custom type you'd like to decode into.
+	* Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
+	* Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
+	* Added new interface type `Extractor`, which is used for extracting JWT strings from http requests.  Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
+	* Added several new, more specific, validation errors to error type bitmask
+	* Moved examples from README to executable example files
+	* Signing method registry is now thread safe
+	* Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
+
+#### 2.7.0
+
+This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
+
+* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
+* Error text for expired tokens includes how long it's been expired
+* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
+* Documentation updates
+
+#### 2.6.0
+
+* Exposed inner error within ValidationError
+* Fixed validation errors when using UseJSONNumber flag
+* Added several unit tests
+
+#### 2.5.0
+
+* Added support for signing method none.  You shouldn't use this.  The API tries to make this clear.
+* Updated/fixed some documentation
+* Added more helpful error message when trying to parse tokens that begin with `BEARER `
+
+#### 2.4.0
+
+* Added new type, Parser, to allow for configuration of various parsing parameters
+	* You can now specify a list of valid signing methods.  Anything outside this set will be rejected.
+	* You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
+* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
+* Fixed some bugs with ECDSA parsing
+
+#### 2.3.0
+
+* Added support for ECDSA signing methods
+* Added support for RSA PSS signing methods (requires go v1.4)
+
+#### 2.2.0
+
+* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`.  Result will now be the parsed token and an error, instead of a panic.
+
+#### 2.1.0
+
+Backwards compatible API change that was missed in 2.0.0.
+
+* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
+
+#### 2.0.0
+
+There were two major reasons for breaking backwards compatibility with this update.  The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations.  There will likely be no required code changes to support this change.
+
+The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods.  Not all keys used for all signing methods have a single standard on-disk representation.  Requiring `[]byte` as the type for all keys proved too limiting.  Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys.  Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
+
+It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
+
+* **Compatibility Breaking Changes**
+	* `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
+	* `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
+	* `KeyFunc` now returns `interface{}` instead of `[]byte`
+	* `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
+	* `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
+* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`.  Specific sizes are now just instances of this type.
+    * Added public package global `SigningMethodHS256`
+    * Added public package global `SigningMethodHS384`
+    * Added public package global `SigningMethodHS512`
+* Renamed type `SigningMethodRS256` to `SigningMethodRSA`.  Specific sizes are now just instances of this type.
+    * Added public package global `SigningMethodRS256`
+    * Added public package global `SigningMethodRS384`
+    * Added public package global `SigningMethodRS512`
+* Moved sample private key for HMAC tests from an inline value to a file on disk.  Value is unchanged.
+* Refactored the RSA implementation to be easier to read
+* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
+
+#### 1.0.2
+
+* Fixed bug in parsing public keys from certificates
+* Added more tests around the parsing of keys for RS256
+* Code refactoring in RS256 implementation.  No functional changes
+
+#### 1.0.1
+
+* Fixed panic if RS256 signing method was passed an invalid key
+
+#### 1.0.0
+
+* First versioned release
+* API stabilized
+* Supports creating, signing, parsing, and validating JWT tokens
+* Supports RS256 and HS256 signing methods

+ 273 - 0
vendor/github.com/golang-jwt/jwt/v4/claims.go

@@ -0,0 +1,273 @@
+package jwt
+
+import (
+	"crypto/subtle"
+	"fmt"
+	"time"
+)
+
+// Claims must just have a Valid method that determines
+// if the token is invalid for any supported reason
+type Claims interface {
+	Valid() error
+}
+
+// RegisteredClaims are a structured version of the JWT Claims Set,
+// restricted to Registered Claim Names, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1
+//
+// This type can be used on its own, but then additional private and
+// public claims embedded in the JWT will not be parsed. The typical usecase
+// therefore is to embedded this in a user-defined claim type.
+//
+// See examples for how to use this with your own claim types.
+type RegisteredClaims struct {
+	// the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1
+	Issuer string `json:"iss,omitempty"`
+
+	// the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2
+	Subject string `json:"sub,omitempty"`
+
+	// the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3
+	Audience ClaimStrings `json:"aud,omitempty"`
+
+	// the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4
+	ExpiresAt *NumericDate `json:"exp,omitempty"`
+
+	// the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5
+	NotBefore *NumericDate `json:"nbf,omitempty"`
+
+	// the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6
+	IssuedAt *NumericDate `json:"iat,omitempty"`
+
+	// the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7
+	ID string `json:"jti,omitempty"`
+}
+
+// Valid validates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (c RegisteredClaims) Valid() error {
+	vErr := new(ValidationError)
+	now := TimeFunc()
+
+	// The claims below are optional, by default, so if they are set to the
+	// default value in Go, let's not fail the verification for them.
+	if !c.VerifyExpiresAt(now, false) {
+		delta := now.Sub(c.ExpiresAt.Time)
+		vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta)
+		vErr.Errors |= ValidationErrorExpired
+	}
+
+	if !c.VerifyIssuedAt(now, false) {
+		vErr.Inner = ErrTokenUsedBeforeIssued
+		vErr.Errors |= ValidationErrorIssuedAt
+	}
+
+	if !c.VerifyNotBefore(now, false) {
+		vErr.Inner = ErrTokenNotValidYet
+		vErr.Errors |= ValidationErrorNotValidYet
+	}
+
+	if vErr.valid() {
+		return nil
+	}
+
+	return vErr
+}
+
+// VerifyAudience compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *RegisteredClaims) VerifyAudience(cmp string, req bool) bool {
+	return verifyAud(c.Audience, cmp, req)
+}
+
+// VerifyExpiresAt compares the exp claim against cmp (cmp < exp).
+// If req is false, it will return true, if exp is unset.
+func (c *RegisteredClaims) VerifyExpiresAt(cmp time.Time, req bool) bool {
+	if c.ExpiresAt == nil {
+		return verifyExp(nil, cmp, req)
+	}
+
+	return verifyExp(&c.ExpiresAt.Time, cmp, req)
+}
+
+// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat).
+// If req is false, it will return true, if iat is unset.
+func (c *RegisteredClaims) VerifyIssuedAt(cmp time.Time, req bool) bool {
+	if c.IssuedAt == nil {
+		return verifyIat(nil, cmp, req)
+	}
+
+	return verifyIat(&c.IssuedAt.Time, cmp, req)
+}
+
+// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
+// If req is false, it will return true, if nbf is unset.
+func (c *RegisteredClaims) VerifyNotBefore(cmp time.Time, req bool) bool {
+	if c.NotBefore == nil {
+		return verifyNbf(nil, cmp, req)
+	}
+
+	return verifyNbf(&c.NotBefore.Time, cmp, req)
+}
+
+// VerifyIssuer compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *RegisteredClaims) VerifyIssuer(cmp string, req bool) bool {
+	return verifyIss(c.Issuer, cmp, req)
+}
+
+// StandardClaims are a structured version of the JWT Claims Set, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the
+// specification exactly, since they were based on an earlier draft of the
+// specification and not updated. The main difference is that they only
+// support integer-based date fields and singular audiences. This might lead to
+// incompatibilities with other JWT implementations. The use of this is discouraged, instead
+// the newer RegisteredClaims struct should be used.
+//
+// Deprecated: Use RegisteredClaims instead for a forward-compatible way to access registered claims in a struct.
+type StandardClaims struct {
+	Audience  string `json:"aud,omitempty"`
+	ExpiresAt int64  `json:"exp,omitempty"`
+	Id        string `json:"jti,omitempty"`
+	IssuedAt  int64  `json:"iat,omitempty"`
+	Issuer    string `json:"iss,omitempty"`
+	NotBefore int64  `json:"nbf,omitempty"`
+	Subject   string `json:"sub,omitempty"`
+}
+
+// Valid validates time based claims "exp, iat, nbf". There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (c StandardClaims) Valid() error {
+	vErr := new(ValidationError)
+	now := TimeFunc().Unix()
+
+	// The claims below are optional, by default, so if they are set to the
+	// default value in Go, let's not fail the verification for them.
+	if !c.VerifyExpiresAt(now, false) {
+		delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
+		vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta)
+		vErr.Errors |= ValidationErrorExpired
+	}
+
+	if !c.VerifyIssuedAt(now, false) {
+		vErr.Inner = ErrTokenUsedBeforeIssued
+		vErr.Errors |= ValidationErrorIssuedAt
+	}
+
+	if !c.VerifyNotBefore(now, false) {
+		vErr.Inner = ErrTokenNotValidYet
+		vErr.Errors |= ValidationErrorNotValidYet
+	}
+
+	if vErr.valid() {
+		return nil
+	}
+
+	return vErr
+}
+
+// VerifyAudience compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
+	return verifyAud([]string{c.Audience}, cmp, req)
+}
+
+// VerifyExpiresAt compares the exp claim against cmp (cmp < exp).
+// If req is false, it will return true, if exp is unset.
+func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+	if c.ExpiresAt == 0 {
+		return verifyExp(nil, time.Unix(cmp, 0), req)
+	}
+
+	t := time.Unix(c.ExpiresAt, 0)
+	return verifyExp(&t, time.Unix(cmp, 0), req)
+}
+
+// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat).
+// If req is false, it will return true, if iat is unset.
+func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+	if c.IssuedAt == 0 {
+		return verifyIat(nil, time.Unix(cmp, 0), req)
+	}
+
+	t := time.Unix(c.IssuedAt, 0)
+	return verifyIat(&t, time.Unix(cmp, 0), req)
+}
+
+// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
+// If req is false, it will return true, if nbf is unset.
+func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
+	if c.NotBefore == 0 {
+		return verifyNbf(nil, time.Unix(cmp, 0), req)
+	}
+
+	t := time.Unix(c.NotBefore, 0)
+	return verifyNbf(&t, time.Unix(cmp, 0), req)
+}
+
+// VerifyIssuer compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
+	return verifyIss(c.Issuer, cmp, req)
+}
+
+// ----- helpers
+
+func verifyAud(aud []string, cmp string, required bool) bool {
+	if len(aud) == 0 {
+		return !required
+	}
+	// use a var here to keep constant time compare when looping over a number of claims
+	result := false
+
+	var stringClaims string
+	for _, a := range aud {
+		if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
+			result = true
+		}
+		stringClaims = stringClaims + a
+	}
+
+	// case where "" is sent in one or many aud claims
+	if len(stringClaims) == 0 {
+		return !required
+	}
+
+	return result
+}
+
+func verifyExp(exp *time.Time, now time.Time, required bool) bool {
+	if exp == nil {
+		return !required
+	}
+	return now.Before(*exp)
+}
+
+func verifyIat(iat *time.Time, now time.Time, required bool) bool {
+	if iat == nil {
+		return !required
+	}
+	return now.After(*iat) || now.Equal(*iat)
+}
+
+func verifyNbf(nbf *time.Time, now time.Time, required bool) bool {
+	if nbf == nil {
+		return !required
+	}
+	return now.After(*nbf) || now.Equal(*nbf)
+}
+
+func verifyIss(iss string, cmp string, required bool) bool {
+	if iss == "" {
+		return !required
+	}
+	if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
+		return true
+	} else {
+		return false
+	}
+}

+ 4 - 0
vendor/github.com/golang-jwt/jwt/v4/doc.go

@@ -0,0 +1,4 @@
+// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
+//
+// See README.md for more info.
+package jwt

+ 142 - 0
vendor/github.com/golang-jwt/jwt/v4/ecdsa.go

@@ -0,0 +1,142 @@
+package jwt
+
+import (
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/rand"
+	"errors"
+	"math/big"
+)
+
+var (
+	// Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+	ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
+)
+
+// SigningMethodECDSA implements the ECDSA family of signing methods.
+// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
+type SigningMethodECDSA struct {
+	Name      string
+	Hash      crypto.Hash
+	KeySize   int
+	CurveBits int
+}
+
+// Specific instances for EC256 and company
+var (
+	SigningMethodES256 *SigningMethodECDSA
+	SigningMethodES384 *SigningMethodECDSA
+	SigningMethodES512 *SigningMethodECDSA
+)
+
+func init() {
+	// ES256
+	SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+	RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+		return SigningMethodES256
+	})
+
+	// ES384
+	SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+	RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+		return SigningMethodES384
+	})
+
+	// ES512
+	SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+	RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+		return SigningMethodES512
+	})
+}
+
+func (m *SigningMethodECDSA) Alg() string {
+	return m.Name
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ecdsa.PublicKey struct
+func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
+	var err error
+
+	// Decode the signature
+	var sig []byte
+	if sig, err = DecodeSegment(signature); err != nil {
+		return err
+	}
+
+	// Get the key
+	var ecdsaKey *ecdsa.PublicKey
+	switch k := key.(type) {
+	case *ecdsa.PublicKey:
+		ecdsaKey = k
+	default:
+		return ErrInvalidKeyType
+	}
+
+	if len(sig) != 2*m.KeySize {
+		return ErrECDSAVerification
+	}
+
+	r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+	s := big.NewInt(0).SetBytes(sig[m.KeySize:])
+
+	// Create hasher
+	if !m.Hash.Available() {
+		return ErrHashUnavailable
+	}
+	hasher := m.Hash.New()
+	hasher.Write([]byte(signingString))
+
+	// Verify the signature
+	if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
+		return nil
+	}
+
+	return ErrECDSAVerification
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ecdsa.PrivateKey struct
+func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
+	// Get the key
+	var ecdsaKey *ecdsa.PrivateKey
+	switch k := key.(type) {
+	case *ecdsa.PrivateKey:
+		ecdsaKey = k
+	default:
+		return "", ErrInvalidKeyType
+	}
+
+	// Create the hasher
+	if !m.Hash.Available() {
+		return "", ErrHashUnavailable
+	}
+
+	hasher := m.Hash.New()
+	hasher.Write([]byte(signingString))
+
+	// Sign the string and return r, s
+	if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+		curveBits := ecdsaKey.Curve.Params().BitSize
+
+		if m.CurveBits != curveBits {
+			return "", ErrInvalidKey
+		}
+
+		keyBytes := curveBits / 8
+		if curveBits%8 > 0 {
+			keyBytes += 1
+		}
+
+		// We serialize the outputs (r and s) into big-endian byte arrays
+		// padded with zeros on the left to make sure the sizes work out.
+		// Output must be 2*keyBytes long.
+		out := make([]byte, 2*keyBytes)
+		r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
+		s.FillBytes(out[keyBytes:])  // s is assigned to the second half of output.
+
+		return EncodeSegment(out), nil
+	} else {
+		return "", err
+	}
+}

+ 69 - 0
vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go

@@ -0,0 +1,69 @@
+package jwt
+
+import (
+	"crypto/ecdsa"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+)
+
+var (
+	ErrNotECPublicKey  = errors.New("key is not a valid ECDSA public key")
+	ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key")
+)
+
+// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure
+func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+	var err error
+
+	// Parse PEM block
+	var block *pem.Block
+	if block, _ = pem.Decode(key); block == nil {
+		return nil, ErrKeyMustBePEMEncoded
+	}
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+		if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+			return nil, err
+		}
+	}
+
+	var pkey *ecdsa.PrivateKey
+	var ok bool
+	if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+		return nil, ErrNotECPrivateKey
+	}
+
+	return pkey, nil
+}
+
+// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
+func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+	var err error
+
+	// Parse PEM block
+	var block *pem.Block
+	if block, _ = pem.Decode(key); block == nil {
+		return nil, ErrKeyMustBePEMEncoded
+	}
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+		if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+			parsedKey = cert.PublicKey
+		} else {
+			return nil, err
+		}
+	}
+
+	var pkey *ecdsa.PublicKey
+	var ok bool
+	if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+		return nil, ErrNotECPublicKey
+	}
+
+	return pkey, nil
+}

+ 85 - 0
vendor/github.com/golang-jwt/jwt/v4/ed25519.go

@@ -0,0 +1,85 @@
+package jwt
+
+import (
+	"errors"
+
+	"crypto"
+	"crypto/ed25519"
+	"crypto/rand"
+)
+
+var (
+	ErrEd25519Verification = errors.New("ed25519: verification error")
+)
+
+// SigningMethodEd25519 implements the EdDSA family.
+// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
+type SigningMethodEd25519 struct{}
+
+// Specific instance for EdDSA
+var (
+	SigningMethodEdDSA *SigningMethodEd25519
+)
+
+func init() {
+	SigningMethodEdDSA = &SigningMethodEd25519{}
+	RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
+		return SigningMethodEdDSA
+	})
+}
+
+func (m *SigningMethodEd25519) Alg() string {
+	return "EdDSA"
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ed25519.PublicKey
+func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error {
+	var err error
+	var ed25519Key ed25519.PublicKey
+	var ok bool
+
+	if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
+		return ErrInvalidKeyType
+	}
+
+	if len(ed25519Key) != ed25519.PublicKeySize {
+		return ErrInvalidKey
+	}
+
+	// Decode the signature
+	var sig []byte
+	if sig, err = DecodeSegment(signature); err != nil {
+		return err
+	}
+
+	// Verify the signature
+	if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
+		return ErrEd25519Verification
+	}
+
+	return nil
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ed25519.PrivateKey
+func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) {
+	var ed25519Key crypto.Signer
+	var ok bool
+
+	if ed25519Key, ok = key.(crypto.Signer); !ok {
+		return "", ErrInvalidKeyType
+	}
+
+	if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok {
+		return "", ErrInvalidKey
+	}
+
+	// Sign the string and return the encoded result
+	// ed25519 performs a two-pass hash as part of its algorithm. Therefore, we need to pass a non-prehashed message into the Sign function, as indicated by crypto.Hash(0)
+	sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0))
+	if err != nil {
+		return "", err
+	}
+	return EncodeSegment(sig), nil
+}

+ 64 - 0
vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go

@@ -0,0 +1,64 @@
+package jwt
+
+import (
+	"crypto"
+	"crypto/ed25519"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+)
+
+var (
+	ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key")
+	ErrNotEdPublicKey  = errors.New("key is not a valid Ed25519 public key")
+)
+
+// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key
+func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
+	var err error
+
+	// Parse PEM block
+	var block *pem.Block
+	if block, _ = pem.Decode(key); block == nil {
+		return nil, ErrKeyMustBePEMEncoded
+	}
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+		return nil, err
+	}
+
+	var pkey ed25519.PrivateKey
+	var ok bool
+	if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
+		return nil, ErrNotEdPrivateKey
+	}
+
+	return pkey, nil
+}
+
+// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key
+func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
+	var err error
+
+	// Parse PEM block
+	var block *pem.Block
+	if block, _ = pem.Decode(key); block == nil {
+		return nil, ErrKeyMustBePEMEncoded
+	}
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+		return nil, err
+	}
+
+	var pkey ed25519.PublicKey
+	var ok bool
+	if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
+		return nil, ErrNotEdPublicKey
+	}
+
+	return pkey, nil
+}

+ 112 - 0
vendor/github.com/golang-jwt/jwt/v4/errors.go

@@ -0,0 +1,112 @@
+package jwt
+
+import (
+	"errors"
+)
+
+// Error constants
+var (
+	ErrInvalidKey      = errors.New("key is invalid")
+	ErrInvalidKeyType  = errors.New("key is of invalid type")
+	ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+
+	ErrTokenMalformed        = errors.New("token is malformed")
+	ErrTokenUnverifiable     = errors.New("token is unverifiable")
+	ErrTokenSignatureInvalid = errors.New("token signature is invalid")
+
+	ErrTokenInvalidAudience  = errors.New("token has invalid audience")
+	ErrTokenExpired          = errors.New("token is expired")
+	ErrTokenUsedBeforeIssued = errors.New("token used before issued")
+	ErrTokenInvalidIssuer    = errors.New("token has invalid issuer")
+	ErrTokenNotValidYet      = errors.New("token is not valid yet")
+	ErrTokenInvalidId        = errors.New("token has invalid id")
+	ErrTokenInvalidClaims    = errors.New("token has invalid claims")
+)
+
+// The errors that might occur when parsing and validating a token
+const (
+	ValidationErrorMalformed        uint32 = 1 << iota // Token is malformed
+	ValidationErrorUnverifiable                        // Token could not be verified because of signing problems
+	ValidationErrorSignatureInvalid                    // Signature validation failed
+
+	// Standard Claim validation errors
+	ValidationErrorAudience      // AUD validation failed
+	ValidationErrorExpired       // EXP validation failed
+	ValidationErrorIssuedAt      // IAT validation failed
+	ValidationErrorIssuer        // ISS validation failed
+	ValidationErrorNotValidYet   // NBF validation failed
+	ValidationErrorId            // JTI validation failed
+	ValidationErrorClaimsInvalid // Generic claims validation error
+)
+
+// NewValidationError is a helper for constructing a ValidationError with a string error message
+func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
+	return &ValidationError{
+		text:   errorText,
+		Errors: errorFlags,
+	}
+}
+
+// ValidationError represents an error from Parse if token is not valid
+type ValidationError struct {
+	Inner  error  // stores the error returned by external dependencies, i.e.: KeyFunc
+	Errors uint32 // bitfield.  see ValidationError... constants
+	text   string // errors that do not have a valid error just have text
+}
+
+// Error is the implementation of the err interface.
+func (e ValidationError) Error() string {
+	if e.Inner != nil {
+		return e.Inner.Error()
+	} else if e.text != "" {
+		return e.text
+	} else {
+		return "token is invalid"
+	}
+}
+
+// Unwrap gives errors.Is and errors.As access to the inner error.
+func (e *ValidationError) Unwrap() error {
+	return e.Inner
+}
+
+// No errors
+func (e *ValidationError) valid() bool {
+	return e.Errors == 0
+}
+
+// Is checks if this ValidationError is of the supplied error. We are first checking for the exact error message
+// by comparing the inner error message. If that fails, we compare using the error flags. This way we can use
+// custom error messages (mainly for backwards compatability) and still leverage errors.Is using the global error variables.
+func (e *ValidationError) Is(err error) bool {
+	// Check, if our inner error is a direct match
+	if errors.Is(errors.Unwrap(e), err) {
+		return true
+	}
+
+	// Otherwise, we need to match using our error flags
+	switch err {
+	case ErrTokenMalformed:
+		return e.Errors&ValidationErrorMalformed != 0
+	case ErrTokenUnverifiable:
+		return e.Errors&ValidationErrorUnverifiable != 0
+	case ErrTokenSignatureInvalid:
+		return e.Errors&ValidationErrorSignatureInvalid != 0
+	case ErrTokenInvalidAudience:
+		return e.Errors&ValidationErrorAudience != 0
+	case ErrTokenExpired:
+		return e.Errors&ValidationErrorExpired != 0
+	case ErrTokenUsedBeforeIssued:
+		return e.Errors&ValidationErrorIssuedAt != 0
+	case ErrTokenInvalidIssuer:
+		return e.Errors&ValidationErrorIssuer != 0
+	case ErrTokenNotValidYet:
+		return e.Errors&ValidationErrorNotValidYet != 0
+	case ErrTokenInvalidId:
+		return e.Errors&ValidationErrorId != 0
+	case ErrTokenInvalidClaims:
+		return e.Errors&ValidationErrorClaimsInvalid != 0
+	}
+
+	return false
+}

+ 95 - 0
vendor/github.com/golang-jwt/jwt/v4/hmac.go

@@ -0,0 +1,95 @@
+package jwt
+
+import (
+	"crypto"
+	"crypto/hmac"
+	"errors"
+)
+
+// SigningMethodHMAC implements the HMAC-SHA family of signing methods.
+// Expects key type of []byte for both signing and validation
+type SigningMethodHMAC struct {
+	Name string
+	Hash crypto.Hash
+}
+
+// Specific instances for HS256 and company
+var (
+	SigningMethodHS256  *SigningMethodHMAC
+	SigningMethodHS384  *SigningMethodHMAC
+	SigningMethodHS512  *SigningMethodHMAC
+	ErrSignatureInvalid = errors.New("signature is invalid")
+)
+
+func init() {
+	// HS256
+	SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+	RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+		return SigningMethodHS256
+	})
+
+	// HS384
+	SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+	RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+		return SigningMethodHS384
+	})
+
+	// HS512
+	SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+	RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+		return SigningMethodHS512
+	})
+}
+
+func (m *SigningMethodHMAC) Alg() string {
+	return m.Name
+}
+
+// Verify implements token verification for the SigningMethod. Returns nil if the signature is valid.
+func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
+	// Verify the key is the right type
+	keyBytes, ok := key.([]byte)
+	if !ok {
+		return ErrInvalidKeyType
+	}
+
+	// Decode signature, for comparison
+	sig, err := DecodeSegment(signature)
+	if err != nil {
+		return err
+	}
+
+	// Can we use the specified hashing method?
+	if !m.Hash.Available() {
+		return ErrHashUnavailable
+	}
+
+	// This signing method is symmetric, so we validate the signature
+	// by reproducing the signature from the signing string and key, then
+	// comparing that against the provided signature.
+	hasher := hmac.New(m.Hash.New, keyBytes)
+	hasher.Write([]byte(signingString))
+	if !hmac.Equal(sig, hasher.Sum(nil)) {
+		return ErrSignatureInvalid
+	}
+
+	// No validation errors.  Signature is good.
+	return nil
+}
+
+// Sign implements token signing for the SigningMethod.
+// Key must be []byte
+func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
+	if keyBytes, ok := key.([]byte); ok {
+		if !m.Hash.Available() {
+			return "", ErrHashUnavailable
+		}
+
+		hasher := hmac.New(m.Hash.New, keyBytes)
+		hasher.Write([]byte(signingString))
+
+		return EncodeSegment(hasher.Sum(nil)), nil
+	}
+
+	return "", ErrInvalidKeyType
+}

+ 151 - 0
vendor/github.com/golang-jwt/jwt/v4/map_claims.go

@@ -0,0 +1,151 @@
+package jwt
+
+import (
+	"encoding/json"
+	"errors"
+	"time"
+	// "fmt"
+)
+
+// MapClaims is a claims type that uses the map[string]interface{} for JSON decoding.
+// This is the default claims type if you don't supply one
+type MapClaims map[string]interface{}
+
+// VerifyAudience Compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
+	var aud []string
+	switch v := m["aud"].(type) {
+	case string:
+		aud = append(aud, v)
+	case []string:
+		aud = v
+	case []interface{}:
+		for _, a := range v {
+			vs, ok := a.(string)
+			if !ok {
+				return false
+			}
+			aud = append(aud, vs)
+		}
+	}
+	return verifyAud(aud, cmp, req)
+}
+
+// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp).
+// If req is false, it will return true, if exp is unset.
+func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+	cmpTime := time.Unix(cmp, 0)
+
+	v, ok := m["exp"]
+	if !ok {
+		return !req
+	}
+
+	switch exp := v.(type) {
+	case float64:
+		if exp == 0 {
+			return verifyExp(nil, cmpTime, req)
+		}
+
+		return verifyExp(&newNumericDateFromSeconds(exp).Time, cmpTime, req)
+	case json.Number:
+		v, _ := exp.Float64()
+
+		return verifyExp(&newNumericDateFromSeconds(v).Time, cmpTime, req)
+	}
+
+	return false
+}
+
+// VerifyIssuedAt compares the exp claim against cmp (cmp >= iat).
+// If req is false, it will return true, if iat is unset.
+func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+	cmpTime := time.Unix(cmp, 0)
+
+	v, ok := m["iat"]
+	if !ok {
+		return !req
+	}
+
+	switch iat := v.(type) {
+	case float64:
+		if iat == 0 {
+			return verifyIat(nil, cmpTime, req)
+		}
+
+		return verifyIat(&newNumericDateFromSeconds(iat).Time, cmpTime, req)
+	case json.Number:
+		v, _ := iat.Float64()
+
+		return verifyIat(&newNumericDateFromSeconds(v).Time, cmpTime, req)
+	}
+
+	return false
+}
+
+// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf).
+// If req is false, it will return true, if nbf is unset.
+func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
+	cmpTime := time.Unix(cmp, 0)
+
+	v, ok := m["nbf"]
+	if !ok {
+		return !req
+	}
+
+	switch nbf := v.(type) {
+	case float64:
+		if nbf == 0 {
+			return verifyNbf(nil, cmpTime, req)
+		}
+
+		return verifyNbf(&newNumericDateFromSeconds(nbf).Time, cmpTime, req)
+	case json.Number:
+		v, _ := nbf.Float64()
+
+		return verifyNbf(&newNumericDateFromSeconds(v).Time, cmpTime, req)
+	}
+
+	return false
+}
+
+// VerifyIssuer compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
+	iss, _ := m["iss"].(string)
+	return verifyIss(iss, cmp, req)
+}
+
+// Valid validates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (m MapClaims) Valid() error {
+	vErr := new(ValidationError)
+	now := TimeFunc().Unix()
+
+	if !m.VerifyExpiresAt(now, false) {
+		// TODO(oxisto): this should be replaced with ErrTokenExpired
+		vErr.Inner = errors.New("Token is expired")
+		vErr.Errors |= ValidationErrorExpired
+	}
+
+	if !m.VerifyIssuedAt(now, false) {
+		// TODO(oxisto): this should be replaced with ErrTokenUsedBeforeIssued
+		vErr.Inner = errors.New("Token used before issued")
+		vErr.Errors |= ValidationErrorIssuedAt
+	}
+
+	if !m.VerifyNotBefore(now, false) {
+		// TODO(oxisto): this should be replaced with ErrTokenNotValidYet
+		vErr.Inner = errors.New("Token is not valid yet")
+		vErr.Errors |= ValidationErrorNotValidYet
+	}
+
+	if vErr.valid() {
+		return nil
+	}
+
+	return vErr
+}

+ 52 - 0
vendor/github.com/golang-jwt/jwt/v4/none.go

@@ -0,0 +1,52 @@
+package jwt
+
+// SigningMethodNone implements the none signing method.  This is required by the spec
+// but you probably should never use it.
+var SigningMethodNone *signingMethodNone
+
+const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
+
+var NoneSignatureTypeDisallowedError error
+
+type signingMethodNone struct{}
+type unsafeNoneMagicConstant string
+
+func init() {
+	SigningMethodNone = &signingMethodNone{}
+	NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
+
+	RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
+		return SigningMethodNone
+	})
+}
+
+func (m *signingMethodNone) Alg() string {
+	return "none"
+}
+
+// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
+	// Key must be UnsafeAllowNoneSignatureType to prevent accidentally
+	// accepting 'none' signing method
+	if _, ok := key.(unsafeNoneMagicConstant); !ok {
+		return NoneSignatureTypeDisallowedError
+	}
+	// If signing method is none, signature must be an empty string
+	if signature != "" {
+		return NewValidationError(
+			"'none' signing method with non-empty signature",
+			ValidationErrorSignatureInvalid,
+		)
+	}
+
+	// Accept 'none' signing method.
+	return nil
+}
+
+// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
+	if _, ok := key.(unsafeNoneMagicConstant); ok {
+		return "", nil
+	}
+	return "", NoneSignatureTypeDisallowedError
+}

+ 170 - 0
vendor/github.com/golang-jwt/jwt/v4/parser.go

@@ -0,0 +1,170 @@
+package jwt
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+type Parser struct {
+	// If populated, only these methods will be considered valid.
+	//
+	// Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead.
+	ValidMethods []string
+
+	// Use JSON Number format in JSON decoder.
+	//
+	// Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead.
+	UseJSONNumber bool
+
+	// Skip claims validation during token parsing.
+	//
+	// Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead.
+	SkipClaimsValidation bool
+}
+
+// NewParser creates a new Parser with the specified options
+func NewParser(options ...ParserOption) *Parser {
+	p := &Parser{}
+
+	// loop through our parsing options and apply them
+	for _, option := range options {
+		option(p)
+	}
+
+	return p
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the key for validating.
+func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+	return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+}
+
+func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+	token, parts, err := p.ParseUnverified(tokenString, claims)
+	if err != nil {
+		return token, err
+	}
+
+	// Verify signing method is in the required set
+	if p.ValidMethods != nil {
+		var signingMethodValid = false
+		var alg = token.Method.Alg()
+		for _, m := range p.ValidMethods {
+			if m == alg {
+				signingMethodValid = true
+				break
+			}
+		}
+		if !signingMethodValid {
+			// signing method is not in the listed set
+			return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
+		}
+	}
+
+	// Lookup key
+	var key interface{}
+	if keyFunc == nil {
+		// keyFunc was not provided.  short circuiting validation
+		return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
+	}
+	if key, err = keyFunc(token); err != nil {
+		// keyFunc returned an error
+		if ve, ok := err.(*ValidationError); ok {
+			return token, ve
+		}
+		return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
+	}
+
+	vErr := &ValidationError{}
+
+	// Validate Claims
+	if !p.SkipClaimsValidation {
+		if err := token.Claims.Valid(); err != nil {
+
+			// If the Claims Valid returned an error, check if it is a validation error,
+			// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
+			if e, ok := err.(*ValidationError); !ok {
+				vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
+			} else {
+				vErr = e
+			}
+		}
+	}
+
+	// Perform validation
+	token.Signature = parts[2]
+	if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
+		vErr.Inner = err
+		vErr.Errors |= ValidationErrorSignatureInvalid
+	}
+
+	if vErr.valid() {
+		token.Valid = true
+		return token, nil
+	}
+
+	return token, vErr
+}
+
+// ParseUnverified parses the token but doesn't validate the signature.
+//
+// WARNING: Don't use this method unless you know what you're doing.
+//
+// It's only ever useful in cases where you know the signature is valid (because it has
+// been checked previously in the stack) and you want to extract values from it.
+func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+	parts = strings.Split(tokenString, ".")
+	if len(parts) != 3 {
+		return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
+	}
+
+	token = &Token{Raw: tokenString}
+
+	// parse Header
+	var headerBytes []byte
+	if headerBytes, err = DecodeSegment(parts[0]); err != nil {
+		if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
+			return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
+		}
+		return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+	}
+	if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+		return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+	}
+
+	// parse Claims
+	var claimBytes []byte
+	token.Claims = claims
+
+	if claimBytes, err = DecodeSegment(parts[1]); err != nil {
+		return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+	}
+	dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+	if p.UseJSONNumber {
+		dec.UseNumber()
+	}
+	// JSON Decode.  Special case for map type to avoid weird pointer behavior
+	if c, ok := token.Claims.(MapClaims); ok {
+		err = dec.Decode(&c)
+	} else {
+		err = dec.Decode(&claims)
+	}
+	// Handle decode error
+	if err != nil {
+		return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+	}
+
+	// Lookup signature method
+	if method, ok := token.Header["alg"].(string); ok {
+		if token.Method = GetSigningMethod(method); token.Method == nil {
+			return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
+		}
+	} else {
+		return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
+	}
+
+	return token, parts, nil
+}

+ 29 - 0
vendor/github.com/golang-jwt/jwt/v4/parser_option.go

@@ -0,0 +1,29 @@
+package jwt
+
+// ParserOption is used to implement functional-style options that modify the behavior of the parser. To add
+// new options, just create a function (ideally beginning with With or Without) that returns an anonymous function that
+// takes a *Parser type as input and manipulates its configuration accordingly.
+type ParserOption func(*Parser)
+
+// WithValidMethods is an option to supply algorithm methods that the parser will check. Only those methods will be considered valid.
+// It is heavily encouraged to use this option in order to prevent attacks such as https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/.
+func WithValidMethods(methods []string) ParserOption {
+	return func(p *Parser) {
+		p.ValidMethods = methods
+	}
+}
+
+// WithJSONNumber is an option to configure the underlying JSON parser with UseNumber
+func WithJSONNumber() ParserOption {
+	return func(p *Parser) {
+		p.UseJSONNumber = true
+	}
+}
+
+// WithoutClaimsValidation is an option to disable claims validation. This option should only be used if you exactly know
+// what you are doing.
+func WithoutClaimsValidation() ParserOption {
+	return func(p *Parser) {
+		p.SkipClaimsValidation = true
+	}
+}

+ 101 - 0
vendor/github.com/golang-jwt/jwt/v4/rsa.go

@@ -0,0 +1,101 @@
+package jwt
+
+import (
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+)
+
+// SigningMethodRSA implements the RSA family of signing methods.
+// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
+type SigningMethodRSA struct {
+	Name string
+	Hash crypto.Hash
+}
+
+// Specific instances for RS256 and company
+var (
+	SigningMethodRS256 *SigningMethodRSA
+	SigningMethodRS384 *SigningMethodRSA
+	SigningMethodRS512 *SigningMethodRSA
+)
+
+func init() {
+	// RS256
+	SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+	RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+		return SigningMethodRS256
+	})
+
+	// RS384
+	SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+	RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+		return SigningMethodRS384
+	})
+
+	// RS512
+	SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+	RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+		return SigningMethodRS512
+	})
+}
+
+func (m *SigningMethodRSA) Alg() string {
+	return m.Name
+}
+
+// Verify implements token verification for the SigningMethod
+// For this signing method, must be an *rsa.PublicKey structure.
+func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
+	var err error
+
+	// Decode the signature
+	var sig []byte
+	if sig, err = DecodeSegment(signature); err != nil {
+		return err
+	}
+
+	var rsaKey *rsa.PublicKey
+	var ok bool
+
+	if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+		return ErrInvalidKeyType
+	}
+
+	// Create hasher
+	if !m.Hash.Available() {
+		return ErrHashUnavailable
+	}
+	hasher := m.Hash.New()
+	hasher.Write([]byte(signingString))
+
+	// Verify the signature
+	return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+}
+
+// Sign implements token signing for the SigningMethod
+// For this signing method, must be an *rsa.PrivateKey structure.
+func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
+	var rsaKey *rsa.PrivateKey
+	var ok bool
+
+	// Validate type of key
+	if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+		return "", ErrInvalidKey
+	}
+
+	// Create the hasher
+	if !m.Hash.Available() {
+		return "", ErrHashUnavailable
+	}
+
+	hasher := m.Hash.New()
+	hasher.Write([]byte(signingString))
+
+	// Sign the string and return the encoded bytes
+	if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+		return EncodeSegment(sigBytes), nil
+	} else {
+		return "", err
+	}
+}

+ 143 - 0
vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go

@@ -0,0 +1,143 @@
+//go:build go1.4
+// +build go1.4
+
+package jwt
+
+import (
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+)
+
+// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods
+type SigningMethodRSAPSS struct {
+	*SigningMethodRSA
+	Options *rsa.PSSOptions
+	// VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
+	// Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
+	// https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
+	// See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
+	VerifyOptions *rsa.PSSOptions
+}
+
+// Specific instances for RS/PS and company.
+var (
+	SigningMethodPS256 *SigningMethodRSAPSS
+	SigningMethodPS384 *SigningMethodRSAPSS
+	SigningMethodPS512 *SigningMethodRSAPSS
+)
+
+func init() {
+	// PS256
+	SigningMethodPS256 = &SigningMethodRSAPSS{
+		SigningMethodRSA: &SigningMethodRSA{
+			Name: "PS256",
+			Hash: crypto.SHA256,
+		},
+		Options: &rsa.PSSOptions{
+			SaltLength: rsa.PSSSaltLengthEqualsHash,
+		},
+		VerifyOptions: &rsa.PSSOptions{
+			SaltLength: rsa.PSSSaltLengthAuto,
+		},
+	}
+	RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+		return SigningMethodPS256
+	})
+
+	// PS384
+	SigningMethodPS384 = &SigningMethodRSAPSS{
+		SigningMethodRSA: &SigningMethodRSA{
+			Name: "PS384",
+			Hash: crypto.SHA384,
+		},
+		Options: &rsa.PSSOptions{
+			SaltLength: rsa.PSSSaltLengthEqualsHash,
+		},
+		VerifyOptions: &rsa.PSSOptions{
+			SaltLength: rsa.PSSSaltLengthAuto,
+		},
+	}
+	RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+		return SigningMethodPS384
+	})
+
+	// PS512
+	SigningMethodPS512 = &SigningMethodRSAPSS{
+		SigningMethodRSA: &SigningMethodRSA{
+			Name: "PS512",
+			Hash: crypto.SHA512,
+		},
+		Options: &rsa.PSSOptions{
+			SaltLength: rsa.PSSSaltLengthEqualsHash,
+		},
+		VerifyOptions: &rsa.PSSOptions{
+			SaltLength: rsa.PSSSaltLengthAuto,
+		},
+	}
+	RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+		return SigningMethodPS512
+	})
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an rsa.PublicKey struct
+func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
+	var err error
+
+	// Decode the signature
+	var sig []byte
+	if sig, err = DecodeSegment(signature); err != nil {
+		return err
+	}
+
+	var rsaKey *rsa.PublicKey
+	switch k := key.(type) {
+	case *rsa.PublicKey:
+		rsaKey = k
+	default:
+		return ErrInvalidKey
+	}
+
+	// Create hasher
+	if !m.Hash.Available() {
+		return ErrHashUnavailable
+	}
+	hasher := m.Hash.New()
+	hasher.Write([]byte(signingString))
+
+	opts := m.Options
+	if m.VerifyOptions != nil {
+		opts = m.VerifyOptions
+	}
+
+	return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an rsa.PrivateKey struct
+func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
+	var rsaKey *rsa.PrivateKey
+
+	switch k := key.(type) {
+	case *rsa.PrivateKey:
+		rsaKey = k
+	default:
+		return "", ErrInvalidKeyType
+	}
+
+	// Create the hasher
+	if !m.Hash.Available() {
+		return "", ErrHashUnavailable
+	}
+
+	hasher := m.Hash.New()
+	hasher.Write([]byte(signingString))
+
+	// Sign the string and return the encoded bytes
+	if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+		return EncodeSegment(sigBytes), nil
+	} else {
+		return "", err
+	}
+}

+ 105 - 0
vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go

@@ -0,0 +1,105 @@
+package jwt
+
+import (
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+)
+
+var (
+	ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key")
+	ErrNotRSAPrivateKey    = errors.New("key is not a valid RSA private key")
+	ErrNotRSAPublicKey     = errors.New("key is not a valid RSA public key")
+)
+
+// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key
+func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+	var err error
+
+	// Parse PEM block
+	var block *pem.Block
+	if block, _ = pem.Decode(key); block == nil {
+		return nil, ErrKeyMustBePEMEncoded
+	}
+
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+		if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+			return nil, err
+		}
+	}
+
+	var pkey *rsa.PrivateKey
+	var ok bool
+	if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+		return nil, ErrNotRSAPrivateKey
+	}
+
+	return pkey, nil
+}
+
+// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password
+//
+// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock
+// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative
+// in the Go standard library for now. See https://github.com/golang/go/issues/8860.
+func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+	var err error
+
+	// Parse PEM block
+	var block *pem.Block
+	if block, _ = pem.Decode(key); block == nil {
+		return nil, ErrKeyMustBePEMEncoded
+	}
+
+	var parsedKey interface{}
+
+	var blockDecrypted []byte
+	if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+		return nil, err
+	}
+
+	if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+		if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+			return nil, err
+		}
+	}
+
+	var pkey *rsa.PrivateKey
+	var ok bool
+	if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+		return nil, ErrNotRSAPrivateKey
+	}
+
+	return pkey, nil
+}
+
+// ParseRSAPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
+func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+	var err error
+
+	// Parse PEM block
+	var block *pem.Block
+	if block, _ = pem.Decode(key); block == nil {
+		return nil, ErrKeyMustBePEMEncoded
+	}
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+		if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+			parsedKey = cert.PublicKey
+		} else {
+			return nil, err
+		}
+	}
+
+	var pkey *rsa.PublicKey
+	var ok bool
+	if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+		return nil, ErrNotRSAPublicKey
+	}
+
+	return pkey, nil
+}

+ 46 - 0
vendor/github.com/golang-jwt/jwt/v4/signing_method.go

@@ -0,0 +1,46 @@
+package jwt
+
+import (
+	"sync"
+)
+
+var signingMethods = map[string]func() SigningMethod{}
+var signingMethodLock = new(sync.RWMutex)
+
+// SigningMethod can be used add new methods for signing or verifying tokens.
+type SigningMethod interface {
+	Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
+	Sign(signingString string, key interface{}) (string, error)    // Returns encoded signature or error
+	Alg() string                                                   // returns the alg identifier for this method (example: 'HS256')
+}
+
+// RegisterSigningMethod registers the "alg" name and a factory function for signing method.
+// This is typically done during init() in the method's implementation
+func RegisterSigningMethod(alg string, f func() SigningMethod) {
+	signingMethodLock.Lock()
+	defer signingMethodLock.Unlock()
+
+	signingMethods[alg] = f
+}
+
+// GetSigningMethod retrieves a signing method from an "alg" string
+func GetSigningMethod(alg string) (method SigningMethod) {
+	signingMethodLock.RLock()
+	defer signingMethodLock.RUnlock()
+
+	if methodF, ok := signingMethods[alg]; ok {
+		method = methodF()
+	}
+	return
+}
+
+// GetAlgorithms returns a list of registered "alg" names
+func GetAlgorithms() (algs []string) {
+	signingMethodLock.RLock()
+	defer signingMethodLock.RUnlock()
+
+	for alg := range signingMethods {
+		algs = append(algs, alg)
+	}
+	return
+}

+ 1 - 0
vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf

@@ -0,0 +1 @@
+checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"]

+ 127 - 0
vendor/github.com/golang-jwt/jwt/v4/token.go

@@ -0,0 +1,127 @@
+package jwt
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"strings"
+	"time"
+)
+
+// DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515
+// states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations
+// of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global
+// variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe.
+// To use the non-recommended decoding, set this boolean to `true` prior to using this package.
+var DecodePaddingAllowed bool
+
+// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
+// You can override it to use another time value.  This is useful for testing or if your
+// server uses a different time zone than your tokens.
+var TimeFunc = time.Now
+
+// Keyfunc will be used by the Parse methods as a callback function to supply
+// the key for verification.  The function receives the parsed,
+// but unverified Token.  This allows you to use properties in the
+// Header of the token (such as `kid`) to identify which key to use.
+type Keyfunc func(*Token) (interface{}, error)
+
+// Token represents a JWT Token.  Different fields will be used depending on whether you're
+// creating or parsing/verifying a token.
+type Token struct {
+	Raw       string                 // The raw token.  Populated when you Parse a token
+	Method    SigningMethod          // The signing method used or to be used
+	Header    map[string]interface{} // The first segment of the token
+	Claims    Claims                 // The second segment of the token
+	Signature string                 // The third segment of the token.  Populated when you Parse a token
+	Valid     bool                   // Is the token valid?  Populated when you Parse/Verify a token
+}
+
+// New creates a new Token with the specified signing method and an empty map of claims.
+func New(method SigningMethod) *Token {
+	return NewWithClaims(method, MapClaims{})
+}
+
+// NewWithClaims creates a new Token with the specified signing method and claims.
+func NewWithClaims(method SigningMethod, claims Claims) *Token {
+	return &Token{
+		Header: map[string]interface{}{
+			"typ": "JWT",
+			"alg": method.Alg(),
+		},
+		Claims: claims,
+		Method: method,
+	}
+}
+
+// SignedString creates and returns a complete, signed JWT.
+// The token is signed using the SigningMethod specified in the token.
+func (t *Token) SignedString(key interface{}) (string, error) {
+	var sig, sstr string
+	var err error
+	if sstr, err = t.SigningString(); err != nil {
+		return "", err
+	}
+	if sig, err = t.Method.Sign(sstr, key); err != nil {
+		return "", err
+	}
+	return strings.Join([]string{sstr, sig}, "."), nil
+}
+
+// SigningString generates the signing string.  This is the
+// most expensive part of the whole deal.  Unless you
+// need this for something special, just go straight for
+// the SignedString.
+func (t *Token) SigningString() (string, error) {
+	var err error
+	var jsonValue []byte
+
+	if jsonValue, err = json.Marshal(t.Header); err != nil {
+		return "", err
+	}
+	header := EncodeSegment(jsonValue)
+
+	if jsonValue, err = json.Marshal(t.Claims); err != nil {
+		return "", err
+	}
+	claim := EncodeSegment(jsonValue)
+
+	return strings.Join([]string{header, claim}, "."), nil
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the cryptographic key
+// for verifying the signature.
+// The caller is strongly encouraged to set the WithValidMethods option to
+// validate the 'alg' claim in the token matches the expected algorithm.
+// For more details about the importance of validating the 'alg' claim,
+// see https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/
+func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+	return NewParser(options...).Parse(tokenString, keyFunc)
+}
+
+func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+	return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc)
+}
+
+// EncodeSegment encodes a JWT specific base64url encoding with padding stripped
+//
+// Deprecated: In a future release, we will demote this function to a non-exported function, since it
+// should only be used internally
+func EncodeSegment(seg []byte) string {
+	return base64.RawURLEncoding.EncodeToString(seg)
+}
+
+// DecodeSegment decodes a JWT specific base64url encoding with padding stripped
+//
+// Deprecated: In a future release, we will demote this function to a non-exported function, since it
+// should only be used internally
+func DecodeSegment(seg string) ([]byte, error) {
+	if DecodePaddingAllowed {
+		if l := len(seg) % 4; l > 0 {
+			seg += strings.Repeat("=", 4-l)
+		}
+		return base64.URLEncoding.DecodeString(seg)
+	}
+
+	return base64.RawURLEncoding.DecodeString(seg)
+}

+ 145 - 0
vendor/github.com/golang-jwt/jwt/v4/types.go

@@ -0,0 +1,145 @@
+package jwt
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+// TimePrecision sets the precision of times and dates within this library.
+// This has an influence on the precision of times when comparing expiry or
+// other related time fields. Furthermore, it is also the precision of times
+// when serializing.
+//
+// For backwards compatibility the default precision is set to seconds, so that
+// no fractional timestamps are generated.
+var TimePrecision = time.Second
+
+// MarshalSingleStringAsArray modifies the behaviour of the ClaimStrings type, especially
+// its MarshalJSON function.
+//
+// If it is set to true (the default), it will always serialize the type as an
+// array of strings, even if it just contains one element, defaulting to the behaviour
+// of the underlying []string. If it is set to false, it will serialize to a single
+// string, if it contains one element. Otherwise, it will serialize to an array of strings.
+var MarshalSingleStringAsArray = true
+
+// NumericDate represents a JSON numeric date value, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-2.
+type NumericDate struct {
+	time.Time
+}
+
+// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct.
+// It will truncate the timestamp according to the precision specified in TimePrecision.
+func NewNumericDate(t time.Time) *NumericDate {
+	return &NumericDate{t.Truncate(TimePrecision)}
+}
+
+// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a
+// UNIX epoch with the float fraction representing non-integer seconds.
+func newNumericDateFromSeconds(f float64) *NumericDate {
+	round, frac := math.Modf(f)
+	return NewNumericDate(time.Unix(int64(round), int64(frac*1e9)))
+}
+
+// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
+// represented in NumericDate to a byte array, using the precision specified in TimePrecision.
+func (date NumericDate) MarshalJSON() (b []byte, err error) {
+	var prec int
+	if TimePrecision < time.Second {
+		prec = int(math.Log10(float64(time.Second) / float64(TimePrecision)))
+	}
+	truncatedDate := date.Truncate(TimePrecision)
+
+	// For very large timestamps, UnixNano would overflow an int64, but this
+	// function requires nanosecond level precision, so we have to use the
+	// following technique to get round the issue:
+	// 1. Take the normal unix timestamp to form the whole number part of the
+	//    output,
+	// 2. Take the result of the Nanosecond function, which retuns the offset
+	//    within the second of the particular unix time instance, to form the
+	//    decimal part of the output
+	// 3. Concatenate them to produce the final result
+	seconds := strconv.FormatInt(truncatedDate.Unix(), 10)
+	nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64)
+
+	output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...)
+
+	return output, nil
+}
+
+// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a
+// NumericDate from a JSON representation, i.e. a json.Number. This number represents an UNIX epoch
+// with either integer or non-integer seconds.
+func (date *NumericDate) UnmarshalJSON(b []byte) (err error) {
+	var (
+		number json.Number
+		f      float64
+	)
+
+	if err = json.Unmarshal(b, &number); err != nil {
+		return fmt.Errorf("could not parse NumericData: %w", err)
+	}
+
+	if f, err = number.Float64(); err != nil {
+		return fmt.Errorf("could not convert json number value to float: %w", err)
+	}
+
+	n := newNumericDateFromSeconds(f)
+	*date = *n
+
+	return nil
+}
+
+// ClaimStrings is basically just a slice of strings, but it can be either serialized from a string array or just a string.
+// This type is necessary, since the "aud" claim can either be a single string or an array.
+type ClaimStrings []string
+
+func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
+	var value interface{}
+
+	if err = json.Unmarshal(data, &value); err != nil {
+		return err
+	}
+
+	var aud []string
+
+	switch v := value.(type) {
+	case string:
+		aud = append(aud, v)
+	case []string:
+		aud = ClaimStrings(v)
+	case []interface{}:
+		for _, vv := range v {
+			vs, ok := vv.(string)
+			if !ok {
+				return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)}
+			}
+			aud = append(aud, vs)
+		}
+	case nil:
+		return nil
+	default:
+		return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)}
+	}
+
+	*s = aud
+
+	return
+}
+
+func (s ClaimStrings) MarshalJSON() (b []byte, err error) {
+	// This handles a special case in the JWT RFC. If the string array, e.g. used by the "aud" field,
+	// only contains one element, it MAY be serialized as a single string. This may or may not be
+	// desired based on the ecosystem of other JWT library used, so we make it configurable by the
+	// variable MarshalSingleStringAsArray.
+	if len(s) == 1 && !MarshalSingleStringAsArray {
+		return json.Marshal(s[0])
+	}
+
+	return json.Marshal([]string(s))
+}

+ 384 - 0
vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go

@@ -0,0 +1,384 @@
+package gha
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"sync"
+	"time"
+
+	"github.com/containerd/containerd/content"
+	"github.com/moby/buildkit/cache/remotecache"
+	v1 "github.com/moby/buildkit/cache/remotecache/v1"
+	"github.com/moby/buildkit/session"
+	"github.com/moby/buildkit/solver"
+	"github.com/moby/buildkit/util/compression"
+	"github.com/moby/buildkit/util/progress"
+	"github.com/moby/buildkit/util/tracing"
+	"github.com/moby/buildkit/worker"
+	digest "github.com/opencontainers/go-digest"
+	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+	actionscache "github.com/tonistiigi/go-actions-cache"
+	"golang.org/x/sync/errgroup"
+)
+
+func init() {
+	actionscache.Log = logrus.Debugf
+}
+
+const (
+	attrScope = "scope"
+	attrToken = "token"
+	attrURL   = "url"
+	version   = "1"
+)
+
+type Config struct {
+	Scope string
+	URL   string
+	Token string
+}
+
+func getConfig(attrs map[string]string) (*Config, error) {
+	scope, ok := attrs[attrScope]
+	if !ok {
+		scope = "buildkit"
+	}
+	url, ok := attrs[attrURL]
+	if !ok {
+		return nil, errors.Errorf("url not set for github actions cache")
+	}
+	token, ok := attrs[attrToken]
+	if !ok {
+		return nil, errors.Errorf("token not set for github actions cache")
+	}
+	return &Config{
+		Scope: scope,
+		URL:   url,
+		Token: token,
+	}, nil
+}
+
+// ResolveCacheExporterFunc for Github actions cache exporter.
+func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc {
+	return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) {
+		cfg, err := getConfig(attrs)
+		if err != nil {
+			return nil, err
+		}
+		return NewExporter(cfg)
+	}
+}
+
+type exporter struct {
+	solver.CacheExporterTarget
+	chains *v1.CacheChains
+	cache  *actionscache.Cache
+	config *Config
+}
+
+func NewExporter(c *Config) (remotecache.Exporter, error) {
+	cc := v1.NewCacheChains()
+	cache, err := actionscache.New(c.Token, c.URL, actionscache.Opt{Client: tracing.DefaultClient})
+	if err != nil {
+		return nil, err
+	}
+	return &exporter{CacheExporterTarget: cc, chains: cc, cache: cache, config: c}, nil
+}
+
+func (*exporter) Name() string {
+	return "exporting to GitHub cache"
+}
+
+func (ce *exporter) Config() remotecache.Config {
+	return remotecache.Config{
+		Compression: compression.New(compression.Default),
+	}
+}
+
+func (ce *exporter) blobKey(dgst digest.Digest) string {
+	return "buildkit-blob-" + version + "-" + dgst.String()
+}
+
+func (ce *exporter) indexKey() string {
+	scope := ""
+	for _, s := range ce.cache.Scopes() {
+		if s.Permission&actionscache.PermissionWrite != 0 {
+			scope = s.Scope
+		}
+	}
+	scope = digest.FromBytes([]byte(scope)).Hex()[:8]
+	return "index-" + ce.config.Scope + "-" + version + "-" + scope
+}
+
+func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) {
+	// res := make(map[string]string)
+	config, descs, err := ce.chains.Marshal(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	// TODO: push parallel
+	for i, l := range config.Layers {
+		dgstPair, ok := descs[l.Blob]
+		if !ok {
+			return nil, errors.Errorf("missing blob %s", l.Blob)
+		}
+		if dgstPair.Descriptor.Annotations == nil {
+			return nil, errors.Errorf("invalid descriptor without annotations")
+		}
+		var diffID digest.Digest
+		v, ok := dgstPair.Descriptor.Annotations["containerd.io/uncompressed"]
+		if !ok {
+			return nil, errors.Errorf("invalid descriptor without uncompressed annotation")
+		}
+		dgst, err := digest.Parse(v)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to parse uncompressed annotation")
+		}
+		diffID = dgst
+
+		key := ce.blobKey(dgstPair.Descriptor.Digest)
+		b, err := ce.cache.Load(ctx, key)
+		if err != nil {
+			return nil, err
+		}
+		if b == nil {
+			layerDone := progress.OneOff(ctx, fmt.Sprintf("writing layer %s", l.Blob))
+			ra, err := dgstPair.Provider.ReaderAt(ctx, dgstPair.Descriptor)
+			if err != nil {
+				return nil, layerDone(err)
+			}
+			if err := ce.cache.Save(ctx, key, ra); err != nil {
+				if !errors.Is(err, os.ErrExist) {
+					return nil, layerDone(errors.Wrap(err, "error writing layer blob"))
+				}
+			}
+			layerDone(nil)
+		}
+		la := &v1.LayerAnnotations{
+			DiffID:    diffID,
+			Size:      dgstPair.Descriptor.Size,
+			MediaType: dgstPair.Descriptor.MediaType,
+		}
+		if v, ok := dgstPair.Descriptor.Annotations["buildkit/createdat"]; ok {
+			var t time.Time
+			if err := (&t).UnmarshalText([]byte(v)); err != nil {
+				return nil, err
+			}
+			la.CreatedAt = t.UTC()
+		}
+		config.Layers[i].Annotations = la
+	}
+
+	dt, err := json.Marshal(config)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := ce.cache.SaveMutable(ctx, ce.indexKey(), 15*time.Second, func(old *actionscache.Entry) (actionscache.Blob, error) {
+		return actionscache.NewBlob(dt), nil
+	}); err != nil {
+		return nil, err
+	}
+
+	return nil, nil
+}
+
+// ResolveCacheImporterFunc for Github actions cache importer.
+func ResolveCacheImporterFunc() remotecache.ResolveCacheImporterFunc {
+	return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Importer, ocispecs.Descriptor, error) {
+		cfg, err := getConfig(attrs)
+		if err != nil {
+			return nil, ocispecs.Descriptor{}, err
+		}
+		i, err := NewImporter(cfg)
+		if err != nil {
+			return nil, ocispecs.Descriptor{}, err
+		}
+		return i, ocispecs.Descriptor{}, nil
+	}
+}
+
+type importer struct {
+	cache  *actionscache.Cache
+	config *Config
+}
+
+func NewImporter(c *Config) (remotecache.Importer, error) {
+	cache, err := actionscache.New(c.Token, c.URL, actionscache.Opt{Client: tracing.DefaultClient})
+	if err != nil {
+		return nil, err
+	}
+	return &importer{cache: cache, config: c}, nil
+}
+
+func (ci *importer) makeDescriptorProviderPair(l v1.CacheLayer) (*v1.DescriptorProviderPair, error) {
+	if l.Annotations == nil {
+		return nil, errors.Errorf("cache layer with missing annotations")
+	}
+	annotations := map[string]string{}
+	if l.Annotations.DiffID == "" {
+		return nil, errors.Errorf("cache layer with missing diffid")
+	}
+	annotations["containerd.io/uncompressed"] = l.Annotations.DiffID.String()
+	if !l.Annotations.CreatedAt.IsZero() {
+		txt, err := l.Annotations.CreatedAt.MarshalText()
+		if err != nil {
+			return nil, err
+		}
+		annotations["buildkit/createdat"] = string(txt)
+	}
+	desc := ocispecs.Descriptor{
+		MediaType:   l.Annotations.MediaType,
+		Digest:      l.Blob,
+		Size:        l.Annotations.Size,
+		Annotations: annotations,
+	}
+	return &v1.DescriptorProviderPair{
+		Descriptor: desc,
+		Provider:   &ciProvider{desc: desc, ci: ci},
+	}, nil
+}
+
+func (ci *importer) loadScope(ctx context.Context, scope string) (*v1.CacheChains, error) {
+	scope = digest.FromBytes([]byte(scope)).Hex()[:8]
+	key := "index-" + ci.config.Scope + "-" + version + "-" + scope
+
+	entry, err := ci.cache.Load(ctx, key)
+	if err != nil {
+		return nil, err
+	}
+	if entry == nil {
+		return v1.NewCacheChains(), nil
+	}
+
+	// TODO: this buffer can be removed
+	buf := &bytes.Buffer{}
+	if err := entry.WriteTo(ctx, buf); err != nil {
+		return nil, err
+	}
+
+	var config v1.CacheConfig
+	if err := json.Unmarshal(buf.Bytes(), &config); err != nil {
+		return nil, errors.WithStack(err)
+	}
+
+	allLayers := v1.DescriptorProvider{}
+
+	for _, l := range config.Layers {
+		dpp, err := ci.makeDescriptorProviderPair(l)
+		if err != nil {
+			return nil, err
+		}
+		allLayers[l.Blob] = *dpp
+	}
+
+	cc := v1.NewCacheChains()
+	if err := v1.ParseConfig(config, allLayers, cc); err != nil {
+		return nil, err
+	}
+	return cc, nil
+}
+
+func (ci *importer) Resolve(ctx context.Context, _ ocispecs.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) {
+	eg, ctx := errgroup.WithContext(ctx)
+	ccs := make([]*v1.CacheChains, len(ci.cache.Scopes()))
+
+	for i, s := range ci.cache.Scopes() {
+		func(i int, scope string) {
+			eg.Go(func() error {
+				cc, err := ci.loadScope(ctx, scope)
+				if err != nil {
+					return err
+				}
+				ccs[i] = cc
+				return nil
+			})
+		}(i, s.Scope)
+	}
+
+	if err := eg.Wait(); err != nil {
+		return nil, err
+	}
+
+	cms := make([]solver.CacheManager, 0, len(ccs))
+
+	for _, cc := range ccs {
+		keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
+		if err != nil {
+			return nil, err
+		}
+		cms = append(cms, solver.NewCacheManager(ctx, id, keysStorage, resultStorage))
+	}
+
+	return solver.NewCombinedCacheManager(cms, nil), nil
+}
+
+type ciProvider struct {
+	ci      *importer
+	desc    ocispecs.Descriptor
+	mu      sync.Mutex
+	entries map[digest.Digest]*actionscache.Entry
+}
+
+func (p *ciProvider) CheckDescriptor(ctx context.Context, desc ocispecs.Descriptor) error {
+	if desc.Digest != p.desc.Digest {
+		return nil
+	}
+
+	_, err := p.loadEntry(ctx, desc)
+	return err
+}
+
+func (p *ciProvider) loadEntry(ctx context.Context, desc ocispecs.Descriptor) (*actionscache.Entry, error) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if ce, ok := p.entries[desc.Digest]; ok {
+		return ce, nil
+	}
+	key := "buildkit-blob-" + version + "-" + desc.Digest.String()
+	ce, err := p.ci.cache.Load(ctx, key)
+	if err != nil {
+		return nil, err
+	}
+	if ce == nil {
+		return nil, errors.Errorf("blob %s not found", desc.Digest)
+	}
+	if p.entries == nil {
+		p.entries = make(map[digest.Digest]*actionscache.Entry)
+	}
+	p.entries[desc.Digest] = ce
+	return ce, nil
+}
+
+func (p *ciProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) {
+	ce, err := p.loadEntry(ctx, desc)
+	if err != nil {
+		return nil, err
+	}
+	rac := ce.Download(context.TODO())
+	return &readerAt{ReaderAtCloser: rac, desc: desc}, nil
+}
+
+type readerAt struct {
+	actionscache.ReaderAtCloser
+	desc ocispecs.Descriptor
+}
+
+func (r *readerAt) ReadAt(p []byte, off int64) (int, error) {
+	if off >= r.desc.Size {
+		return 0, io.EOF
+	}
+	return r.ReaderAtCloser.ReadAt(p, off)
+}
+
+func (r *readerAt) Size() int64 {
+	return r.desc.Size
+}

+ 461 - 0
vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go

@@ -0,0 +1,461 @@
+package containerdexecutor
+
+import (
+	"context"
+	"io"
+	"os"
+	"path/filepath"
+	"sync"
+	"syscall"
+	"time"
+
+	"github.com/moby/buildkit/util/bklog"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+
+	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/cio"
+	"github.com/containerd/containerd/mount"
+	containerdoci "github.com/containerd/containerd/oci"
+	"github.com/containerd/continuity/fs"
+	"github.com/docker/docker/pkg/idtools"
+	"github.com/moby/buildkit/executor"
+	"github.com/moby/buildkit/executor/oci"
+	gatewayapi "github.com/moby/buildkit/frontend/gateway/pb"
+	"github.com/moby/buildkit/identity"
+	"github.com/moby/buildkit/snapshot"
+	"github.com/moby/buildkit/solver/pb"
+	"github.com/moby/buildkit/util/network"
+	rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv"
+	"github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+)
+
+type containerdExecutor struct {
+	client           *containerd.Client
+	root             string
+	networkProviders map[pb.NetMode]network.Provider
+	cgroupParent     string
+	dnsConfig        *oci.DNSConfig
+	running          map[string]chan error
+	mu               sync.Mutex
+	apparmorProfile  string
+	selinux          bool
+	traceSocket      string
+	rootless         bool
+}
+
+// OnCreateRuntimer provides an alternative to OCI hooks for applying network
+// configuration to a container. If the [network.Provider] returns a
+// [network.Namespace] which also implements this interface, the containerd
+// executor will run the callback at the appropriate point in the container
+// lifecycle.
+type OnCreateRuntimer interface {
+	// OnCreateRuntime is analogous to the createRuntime OCI hook. The
+	// function is called after the container is created, before the user
+	// process has been executed. The argument is the container PID in the
+	// runtime namespace.
+	OnCreateRuntime(pid uint32) error
+}
+
+// New creates a new executor backed by connection to containerd API
+func New(client *containerd.Client, root, cgroup string, networkProviders map[pb.NetMode]network.Provider, dnsConfig *oci.DNSConfig, apparmorProfile string, selinux bool, traceSocket string, rootless bool) executor.Executor {
+	// clean up old hosts/resolv.conf file. ignore errors
+	os.RemoveAll(filepath.Join(root, "hosts"))
+	os.RemoveAll(filepath.Join(root, "resolv.conf"))
+
+	return &containerdExecutor{
+		client:           client,
+		root:             root,
+		networkProviders: networkProviders,
+		cgroupParent:     cgroup,
+		dnsConfig:        dnsConfig,
+		running:          make(map[string]chan error),
+		apparmorProfile:  apparmorProfile,
+		selinux:          selinux,
+		traceSocket:      traceSocket,
+		rootless:         rootless,
+	}
+}
+
+func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) {
+	if id == "" {
+		id = identity.NewID()
+	}
+
+	startedOnce := sync.Once{}
+	done := make(chan error, 1)
+	w.mu.Lock()
+	w.running[id] = done
+	w.mu.Unlock()
+	defer func() {
+		w.mu.Lock()
+		delete(w.running, id)
+		w.mu.Unlock()
+		done <- err
+		close(done)
+		if started != nil {
+			startedOnce.Do(func() {
+				close(started)
+			})
+		}
+	}()
+
+	meta := process.Meta
+
+	resolvConf, err := oci.GetResolvConf(ctx, w.root, nil, w.dnsConfig)
+	if err != nil {
+		return err
+	}
+
+	hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, nil, meta.Hostname)
+	if err != nil {
+		return err
+	}
+	if clean != nil {
+		defer clean()
+	}
+
+	mountable, err := root.Src.Mount(ctx, false)
+	if err != nil {
+		return err
+	}
+
+	rootMounts, release, err := mountable.Mount()
+	if err != nil {
+		return err
+	}
+	if release != nil {
+		defer release()
+	}
+
+	lm := snapshot.LocalMounterWithMounts(rootMounts)
+	rootfsPath, err := lm.Mount()
+	if err != nil {
+		return err
+	}
+	defer lm.Unmount()
+	defer executor.MountStubsCleaner(rootfsPath, mounts, meta.RemoveMountStubsRecursive)()
+
+	uid, gid, sgids, err := oci.GetUser(rootfsPath, meta.User)
+	if err != nil {
+		return err
+	}
+
+	identity := idtools.Identity{
+		UID: int(uid),
+		GID: int(gid),
+	}
+
+	newp, err := fs.RootPath(rootfsPath, meta.Cwd)
+	if err != nil {
+		return errors.Wrapf(err, "working dir %s points to invalid target", newp)
+	}
+	if _, err := os.Stat(newp); err != nil {
+		if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
+			return errors.Wrapf(err, "failed to create working directory %s", newp)
+		}
+	}
+
+	provider, ok := w.networkProviders[meta.NetMode]
+	if !ok {
+		return errors.Errorf("unknown network mode %s", meta.NetMode)
+	}
+	namespace, err := provider.New(ctx, meta.Hostname)
+	if err != nil {
+		return err
+	}
+	defer namespace.Close()
+
+	if meta.NetMode == pb.NetMode_HOST {
+		bklog.G(ctx).Info("enabling HostNetworking")
+	}
+
+	opts := []containerdoci.SpecOpts{oci.WithUIDGID(uid, gid, sgids)}
+	if meta.ReadonlyRootFS {
+		opts = append(opts, containerdoci.WithRootFSReadonly())
+	}
+
+	processMode := oci.ProcessSandbox // FIXME(AkihiroSuda)
+	spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.selinux, w.traceSocket, opts...)
+	if err != nil {
+		return err
+	}
+	defer cleanup()
+	spec.Process.Terminal = meta.Tty
+	if w.rootless {
+		if err := rootlessspecconv.ToRootless(spec); err != nil {
+			return err
+		}
+	}
+
+	container, err := w.client.NewContainer(ctx, id,
+		containerd.WithSpec(spec),
+	)
+	if err != nil {
+		return err
+	}
+
+	defer func() {
+		if err1 := container.Delete(context.TODO()); err == nil && err1 != nil {
+			err = errors.Wrapf(err1, "failed to delete container %s", id)
+		}
+	}()
+
+	fixProcessOutput(&process)
+	cioOpts := []cio.Opt{cio.WithStreams(process.Stdin, process.Stdout, process.Stderr)}
+	if meta.Tty {
+		cioOpts = append(cioOpts, cio.WithTerminal)
+	}
+
+	task, err := container.NewTask(ctx, cio.NewCreator(cioOpts...), containerd.WithRootFS([]mount.Mount{{
+		Source:  rootfsPath,
+		Type:    "bind",
+		Options: []string{"rbind"},
+	}}))
+	if err != nil {
+		return err
+	}
+
+	defer func() {
+		if _, err1 := task.Delete(context.TODO(), containerd.WithProcessKill); err == nil && err1 != nil {
+			err = errors.Wrapf(err1, "failed to delete task %s", id)
+		}
+	}()
+
+	if nn, ok := namespace.(OnCreateRuntimer); ok {
+		if err := nn.OnCreateRuntime(task.Pid()); err != nil {
+			return err
+		}
+	}
+
+	trace.SpanFromContext(ctx).AddEvent("Container created")
+	err = w.runProcess(ctx, task, process.Resize, process.Signal, func() {
+		startedOnce.Do(func() {
+			trace.SpanFromContext(ctx).AddEvent("Container started")
+			if started != nil {
+				close(started)
+			}
+		})
+	})
+	return err
+}
+
+func (w *containerdExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) (err error) {
+	meta := process.Meta
+
+	// first verify the container is running, if we get an error assume the container
+	// is in the process of being created and check again every 100ms or until
+	// context is canceled.
+
+	var container containerd.Container
+	var task containerd.Task
+	for {
+		w.mu.Lock()
+		done, ok := w.running[id]
+		w.mu.Unlock()
+
+		if !ok {
+			return errors.Errorf("container %s not found", id)
+		}
+
+		if container == nil {
+			container, _ = w.client.LoadContainer(ctx, id)
+		}
+		if container != nil && task == nil {
+			task, _ = container.Task(ctx, nil)
+		}
+		if task != nil {
+			status, _ := task.Status(ctx)
+			if status.Status == containerd.Running {
+				break
+			}
+		}
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		case err, ok := <-done:
+			if !ok || err == nil {
+				return errors.Errorf("container %s has stopped", id)
+			}
+			return errors.Wrapf(err, "container %s has exited with error", id)
+		case <-time.After(100 * time.Millisecond):
+			continue
+		}
+	}
+
+	spec, err := container.Spec(ctx)
+	if err != nil {
+		return errors.WithStack(err)
+	}
+
+	proc := spec.Process
+
+	// TODO how do we get rootfsPath for oci.GetUser in case user passed in username rather than uid:gid?
+	// For now only support uid:gid
+	if meta.User != "" {
+		uid, gid, err := oci.ParseUIDGID(meta.User)
+		if err != nil {
+			return errors.WithStack(err)
+		}
+		proc.User = specs.User{
+			UID:            uid,
+			GID:            gid,
+			AdditionalGids: []uint32{},
+		}
+	}
+
+	proc.Terminal = meta.Tty
+	proc.Args = meta.Args
+	if meta.Cwd != "" {
+		spec.Process.Cwd = meta.Cwd
+	}
+	if len(process.Meta.Env) > 0 {
+		spec.Process.Env = process.Meta.Env
+	}
+
+	fixProcessOutput(&process)
+	cioOpts := []cio.Opt{cio.WithStreams(process.Stdin, process.Stdout, process.Stderr)}
+	if meta.Tty {
+		cioOpts = append(cioOpts, cio.WithTerminal)
+	}
+
+	taskProcess, err := task.Exec(ctx, identity.NewID(), proc, cio.NewCreator(cioOpts...))
+	if err != nil {
+		return errors.WithStack(err)
+	}
+
+	err = w.runProcess(ctx, taskProcess, process.Resize, process.Signal, nil)
+	return err
+}
+
+func fixProcessOutput(process *executor.ProcessInfo) {
+	// It seems like if containerd has one of stdin, stdout or stderr then the
+	// others need to be present as well otherwise we get this error:
+	// failed to start io pipe copy: unable to copy pipes: containerd-shim: opening file "" failed: open : no such file or directory: unknown
+	// So just stub out any missing output
+	if process.Stdout == nil {
+		process.Stdout = &nopCloser{io.Discard}
+	}
+	if process.Stderr == nil {
+		process.Stderr = &nopCloser{io.Discard}
+	}
+}
+
+func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Process, resize <-chan executor.WinSize, signal <-chan syscall.Signal, started func()) error {
+	// Not using `ctx` here because the context passed only affects the statusCh which we
+	// don't want cancelled when ctx.Done is sent.  We want to process statusCh on cancel.
+	statusCh, err := p.Wait(context.Background())
+	if err != nil {
+		return err
+	}
+
+	io := p.IO()
+	defer func() {
+		io.Wait()
+		io.Close()
+	}()
+
+	err = p.Start(ctx)
+	if err != nil {
+		return err
+	}
+
+	if started != nil {
+		started()
+	}
+
+	p.CloseIO(ctx, containerd.WithStdinCloser)
+
+	// handle signals (and resize) in separate go loop so it does not
+	// potentially block the container cancel/exit status loop below.
+	eventCtx, eventCancel := context.WithCancel(ctx)
+	defer eventCancel()
+	go func() {
+		for {
+			select {
+			case <-eventCtx.Done():
+				return
+			case size, ok := <-resize:
+				if !ok {
+					return // chan closed
+				}
+				err = p.Resize(eventCtx, size.Cols, size.Rows)
+				if err != nil {
+					bklog.G(eventCtx).Warnf("Failed to resize %s: %s", p.ID(), err)
+				}
+			}
+		}
+	}()
+	go func() {
+		for {
+			select {
+			case <-eventCtx.Done():
+				return
+			case sig, ok := <-signal:
+				if !ok {
+					return // chan closed
+				}
+				err = p.Kill(eventCtx, sig)
+				if err != nil {
+					bklog.G(eventCtx).Warnf("Failed to signal %s: %s", p.ID(), err)
+				}
+			}
+		}
+	}()
+
+	var cancel func()
+	var killCtxDone <-chan struct{}
+	ctxDone := ctx.Done()
+	for {
+		select {
+		case <-ctxDone:
+			ctxDone = nil
+			var killCtx context.Context
+			killCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
+			killCtxDone = killCtx.Done()
+			p.Kill(killCtx, syscall.SIGKILL)
+			io.Cancel()
+		case status := <-statusCh:
+			if cancel != nil {
+				cancel()
+			}
+			trace.SpanFromContext(ctx).AddEvent(
+				"Container exited",
+				trace.WithAttributes(
+					attribute.Int("exit.code", int(status.ExitCode())),
+				),
+			)
+			if status.ExitCode() != 0 {
+				exitErr := &gatewayapi.ExitError{
+					ExitCode: status.ExitCode(),
+					Err:      status.Error(),
+				}
+				if status.ExitCode() == gatewayapi.UnknownExitStatus && status.Error() != nil {
+					exitErr.Err = errors.Wrap(status.Error(), "failure waiting for process")
+				}
+				select {
+				case <-ctx.Done():
+					exitErr.Err = errors.Wrap(ctx.Err(), exitErr.Error())
+				default:
+				}
+				return exitErr
+			}
+			return nil
+		case <-killCtxDone:
+			if cancel != nil {
+				cancel()
+			}
+			io.Cancel()
+			return errors.Errorf("failed to kill process on cancel")
+		}
+	}
+}
+
+type nopCloser struct {
+	io.Writer
+}
+
+func (c *nopCloser) Close() error {
+	return nil
+}

+ 298 - 0
vendor/github.com/moby/buildkit/exporter/oci/export.go

@@ -0,0 +1,298 @@
+package oci
+
+import (
+	"context"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	archiveexporter "github.com/containerd/containerd/images/archive"
+	"github.com/containerd/containerd/leases"
+	"github.com/containerd/containerd/remotes"
+	"github.com/docker/distribution/reference"
+	intoto "github.com/in-toto/in-toto-golang/in_toto"
+	"github.com/moby/buildkit/cache"
+	cacheconfig "github.com/moby/buildkit/cache/config"
+	"github.com/moby/buildkit/exporter"
+	"github.com/moby/buildkit/exporter/containerimage"
+	"github.com/moby/buildkit/exporter/containerimage/exptypes"
+	"github.com/moby/buildkit/session"
+	sessioncontent "github.com/moby/buildkit/session/content"
+	"github.com/moby/buildkit/session/filesync"
+	"github.com/moby/buildkit/util/compression"
+	"github.com/moby/buildkit/util/contentutil"
+	"github.com/moby/buildkit/util/grpcerrors"
+	"github.com/moby/buildkit/util/leaseutil"
+	"github.com/moby/buildkit/util/progress"
+	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+	"google.golang.org/grpc/codes"
+)
+
+type ExporterVariant string
+
+const (
+	VariantOCI    = "oci"
+	VariantDocker = "docker"
+)
+
+const (
+	keyTar = "tar"
+)
+
+type Opt struct {
+	SessionManager *session.Manager
+	ImageWriter    *containerimage.ImageWriter
+	Variant        ExporterVariant
+	LeaseManager   leases.Manager
+}
+
+type imageExporter struct {
+	opt Opt
+}
+
+func New(opt Opt) (exporter.Exporter, error) {
+	im := &imageExporter{opt: opt}
+	return im, nil
+}
+
+func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
+	i := &imageExporterInstance{
+		imageExporter: e,
+		tar:           true,
+		opts: containerimage.ImageCommitOpts{
+			RefCfg: cacheconfig.RefConfig{
+				Compression: compression.New(compression.Default),
+			},
+			BuildInfo: true,
+			OCITypes:  e.opt.Variant == VariantOCI,
+		},
+	}
+
+	opt, err := i.opts.Load(opt)
+	if err != nil {
+		return nil, err
+	}
+
+	for k, v := range opt {
+		switch k {
+		case keyTar:
+			if v == "" {
+				i.tar = true
+				continue
+			}
+			b, err := strconv.ParseBool(v)
+			if err != nil {
+				return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
+			}
+			i.tar = b
+		default:
+			if i.meta == nil {
+				i.meta = make(map[string][]byte)
+			}
+			i.meta[k] = []byte(v)
+		}
+	}
+	return i, nil
+}
+
+type imageExporterInstance struct {
+	*imageExporter
+	opts containerimage.ImageCommitOpts
+	tar  bool
+	meta map[string][]byte
+}
+
+func (e *imageExporterInstance) Name() string {
+	return fmt.Sprintf("exporting to %s image format", e.opt.Variant)
+}
+
+func (e *imageExporterInstance) Config() *exporter.Config {
+	return exporter.NewConfigWithCompression(e.opts.RefCfg.Compression)
+}
+
+func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source, sessionID string) (_ map[string]string, descref exporter.DescriptorReference, err error) {
+	if e.opt.Variant == VariantDocker && len(src.Refs) > 0 {
+		return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
+	}
+
+	if src.Metadata == nil {
+		src.Metadata = make(map[string][]byte)
+	}
+	for k, v := range e.meta {
+		src.Metadata[k] = v
+	}
+
+	opts := e.opts
+	as, _, err := containerimage.ParseAnnotations(src.Metadata)
+	if err != nil {
+		return nil, nil, err
+	}
+	opts.Annotations = opts.Annotations.Merge(as)
+
+	ctx, done, err := leaseutil.WithLease(ctx, e.opt.LeaseManager, leaseutil.MakeTemporary)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer func() {
+		if descref == nil {
+			done(context.TODO())
+		}
+	}()
+
+	desc, err := e.opt.ImageWriter.Commit(ctx, src, sessionID, &opts)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer func() {
+		if err == nil {
+			descref = containerimage.NewDescriptorReference(*desc, done)
+		}
+	}()
+
+	if desc.Annotations == nil {
+		desc.Annotations = map[string]string{}
+	}
+	if _, ok := desc.Annotations[ocispecs.AnnotationCreated]; !ok {
+		tm := time.Now()
+		if opts.Epoch != nil {
+			tm = *opts.Epoch
+		}
+		desc.Annotations[ocispecs.AnnotationCreated] = tm.UTC().Format(time.RFC3339)
+	}
+
+	resp := make(map[string]string)
+
+	resp[exptypes.ExporterImageDigestKey] = desc.Digest.String()
+	if v, ok := desc.Annotations[exptypes.ExporterConfigDigestKey]; ok {
+		resp[exptypes.ExporterImageConfigDigestKey] = v
+		delete(desc.Annotations, exptypes.ExporterConfigDigestKey)
+	}
+
+	dtdesc, err := json.Marshal(desc)
+	if err != nil {
+		return nil, nil, err
+	}
+	resp[exptypes.ExporterImageDescriptorKey] = base64.StdEncoding.EncodeToString(dtdesc)
+
+	if n, ok := src.Metadata["image.name"]; e.opts.ImageName == "*" && ok {
+		e.opts.ImageName = string(n)
+	}
+
+	names, err := normalizedNames(e.opts.ImageName)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if len(names) != 0 {
+		resp["image.name"] = strings.Join(names, ",")
+	}
+
+	expOpts := []archiveexporter.ExportOpt{archiveexporter.WithManifest(*desc, names...)}
+	switch e.opt.Variant {
+	case VariantOCI:
+		expOpts = append(expOpts, archiveexporter.WithAllPlatforms(), archiveexporter.WithSkipDockerManifest())
+	case VariantDocker:
+	default:
+		return nil, nil, errors.Errorf("invalid variant %q", e.opt.Variant)
+	}
+
+	timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
+	defer cancel()
+
+	caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore())
+	if src.Ref != nil {
+		remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
+		if err != nil {
+			return nil, nil, err
+		}
+		remote := remotes[0]
+		// unlazy before tar export as the tar writer does not handle
+		// layer blobs in parallel (whereas unlazy does)
+		if unlazier, ok := remote.Provider.(cache.Unlazier); ok {
+			if err := unlazier.Unlazy(ctx); err != nil {
+				return nil, nil, err
+			}
+		}
+		for _, desc := range remote.Descriptors {
+			mprovider.Add(desc.Digest, remote.Provider)
+		}
+	}
+	if len(src.Refs) > 0 {
+		for _, r := range src.Refs {
+			remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
+			if err != nil {
+				return nil, nil, err
+			}
+			remote := remotes[0]
+			if unlazier, ok := remote.Provider.(cache.Unlazier); ok {
+				if err := unlazier.Unlazy(ctx); err != nil {
+					return nil, nil, err
+				}
+			}
+			for _, desc := range remote.Descriptors {
+				mprovider.Add(desc.Digest, remote.Provider)
+			}
+		}
+	}
+
+	if e.tar {
+		w, err := filesync.CopyFileWriter(ctx, resp, caller)
+		if err != nil {
+			return nil, nil, err
+		}
+
+		report := progress.OneOff(ctx, "sending tarball")
+		if err := archiveexporter.Export(ctx, mprovider, w, expOpts...); err != nil {
+			w.Close()
+			if grpcerrors.Code(err) == codes.AlreadyExists {
+				return resp, nil, report(nil)
+			}
+			return nil, nil, report(err)
+		}
+		err = w.Close()
+		if grpcerrors.Code(err) == codes.AlreadyExists {
+			return resp, nil, report(nil)
+		}
+		if err != nil {
+			return nil, nil, report(err)
+		}
+		report(nil)
+	} else {
+		ctx = remotes.WithMediaTypeKeyPrefix(ctx, intoto.PayloadType, "intoto")
+		store := sessioncontent.NewCallerStore(caller, "export")
+		if err != nil {
+			return nil, nil, err
+		}
+		err := contentutil.CopyChain(ctx, store, mprovider, *desc)
+		if err != nil {
+			return nil, nil, err
+		}
+	}
+
+	return resp, nil, nil
+}
+
+func normalizedNames(name string) ([]string, error) {
+	if name == "" {
+		return nil, nil
+	}
+	names := strings.Split(name, ",")
+	var tagNames = make([]string, len(names))
+	for i, name := range names {
+		parsed, err := reference.ParseNormalizedNamed(name)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to parse %s", name)
+		}
+		tagNames[i] = reference.TagNameOnly(parsed).String()
+	}
+	return tagNames, nil
+}

Неке датотеке нису приказане због велике количине промена